diff --git a/examples/device/cublasdx_fp64_emulation.py b/examples/device/cublasdx_fp64_emulation.py index 81595ed..a5654f9 100644 --- a/examples/device/cublasdx_fp64_emulation.py +++ b/examples/device/cublasdx_fp64_emulation.py @@ -23,7 +23,7 @@ import numpy as np from numba import int32, int8, int16, float64, int64, types from numba.types import Tuple -import cuda.cooperative.experimental as cudax +import cuda.cccl.cooperative.experimental as cudax from common import mm_perf_GFlops, random_real from common_numba import time_numba diff --git a/tests/example_tests/device_tests/test_device_samples.py b/tests/example_tests/device_tests/test_device_samples.py index 375f74c..220435b 100644 --- a/tests/example_tests/device_tests/test_device_samples.py +++ b/tests/example_tests/device_tests/test_device_samples.py @@ -24,7 +24,7 @@ def test_sample(self, sample): # are using global memory alignment in the sample. pytest.skip("Skipping test for cublasdx_device_gemm_performance.py, requires libmathdx >= 0.2.1") if os.path.basename(sample) == "cublasdx_fp64_emulation.py": - spec = importlib.util.find_spec("cuda.cooperative") + spec = importlib.util.find_spec("cuda.cccl.cooperative") if spec is None: pytest.skip("Skipping test for cublasdx_fp64_emulation.py, requires cuda.cooperative module") run_sample(samples_path, sample, {"__name__": "__main__"})