From c6ace7fd88556e945c3bce61f8e982dff0a891ef Mon Sep 17 00:00:00 2001 From: meichangsu1 <1484603386@qq.com> Date: Wed, 11 Feb 2026 15:41:47 +0800 Subject: [PATCH 1/2] feat(tests): replace manual sp_group retrieval with module attribute Replace calls to `_get_sp_group_from_device_mesh` with direct access to `sequence_parallel._sp_group` in sequence parallel attention tests. This simplifies the test setup by using the already initialized group stored in the module, improving code clarity and reducing redundancy. --- .../test_sequence_parallel_single_attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/sequence_parallel/test_sequence_parallel_single_attention.py b/tests/sequence_parallel/test_sequence_parallel_single_attention.py index dde6b387..32e01aaa 100644 --- a/tests/sequence_parallel/test_sequence_parallel_single_attention.py +++ b/tests/sequence_parallel/test_sequence_parallel_single_attention.py @@ -181,7 +181,7 @@ def _run_worker_single_attn(rank: int, world_size: int, port: int, padding: bool sp_size = world_size device_mesh = DeviceMesh.from_sizes(dp_size=world_size, ulysses_size=sp_size, device_type="cuda") _setup_sp(device_mesh, sp_size) - sp_group = _get_sp_group_from_device_mesh(device_mesh, sp_size) + sp_group = sequence_parallel._sp_group batch_size = 2 unpad_seq_len = 127 if padding else 128 @@ -271,7 +271,7 @@ def _run_worker_single_attn_fsdp(rank: int, world_size: int, port: int): # For FSDP+SP, SP is derived from dp/fsdp ranks. Use fsdp=world, dp=1. device_mesh = DeviceMesh.from_sizes(fsdp_size=world_size, dp_size=1, ulysses_size=sp_size, device_type="cuda") _setup_sp(device_mesh, sp_size) - sp_group = _get_sp_group_from_device_mesh(device_mesh, sp_size) + sp_group = sequence_parallel._sp_group batch_size = 2 unpad_seq_len = 128 From 939466f1aafe8d5a213db8f2edeecdb353459f68 Mon Sep 17 00:00:00 2001 From: meichangsu1 <1484603386@qq.com> Date: Fri, 13 Feb 2026 15:43:14 +0800 Subject: [PATCH 2/2] feat(tests): improve kernel availability check in test_function_kernel Add additional imports and a try-except block to verify that the 'kernels-test/flattened-build' kernel can be successfully loaded in the current environment before proceeding with the test. This prevents test failures due to environment-specific loading issues and provides a more informative skip message. --- tests/kernel/test_function_kernel.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/kernel/test_function_kernel.py b/tests/kernel/test_function_kernel.py index 66f375ee..fe95bafa 100644 --- a/tests/kernel/test_function_kernel.py +++ b/tests/kernel/test_function_kernel.py @@ -54,10 +54,21 @@ def test_flattened_build_replaces_function(self): self.skipTest(f'HuggingFace unreachable: {e}') try: from kernels import has_kernel + from kernels._versions import select_revision_or_version + from kernels.utils import get_kernel except Exception: self.skipTest('kernels package missing has_kernel.') if not has_kernel('kernels-test/flattened-build'): self.skipTest('kernels-test/flattened-build not available.') + try: + revision = select_revision_or_version( + 'kernels-test/flattened-build', + revision=None, + version=None, + ) + get_kernel('kernels-test/flattened-build', revision=revision) + except Exception as exc: + self.skipTest(f'kernels-test/flattened-build cannot be loaded in this env: {exc}') _ensure_test_packages() module_name = 'tests.kernel._tmp_flattened_build_module'