diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index e1c28dd418..4d50db8438 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -200,8 +200,7 @@ def get_gpu_info() -> OrderedDict: if num_gpus > 0: _dict_append(output, "Current device", torch.cuda.current_device) - if hasattr(torch.cuda, "get_arch_list"): # get_arch_list is new in torch 1.7.1 - _dict_append(output, "Library compiled for CUDA architectures", torch.cuda.get_arch_list) + _dict_append(output, "Library compiled for CUDA architectures", torch.cuda.get_arch_list) for gpu in range(num_gpus): gpu_info = torch.cuda.get_device_properties(gpu) diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index ce8b9262b4..c5fe05d220 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -405,7 +405,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.") if isinstance(img, torch.Tensor): - if hasattr(torch, "isin"): + if hasattr(torch, "isin"): # `isin` is new in torch 1.10.0 appl_lbls = torch.as_tensor(self.applied_labels, device=img.device) return torch.where(torch.isin(img, appl_lbls), img, torch.tensor(0.0).to(img)) else: diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index ca683c8824..739d98e5c0 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -1306,7 +1306,7 @@ def shift_fourier(x: NdarrayOrTensor, spatial_dims: int, n_dims: Optional[int] = dims = tuple(range(-spatial_dims, 0)) k: NdarrayOrTensor if isinstance(x, torch.Tensor): - if hasattr(torch.fft, "fftshift"): + if hasattr(torch.fft, "fftshift"): # `fftshift` is new in torch 1.8.0 k = torch.fft.fftshift(torch.fft.fftn(x, dim=dims), dim=dims) else: # if using old PyTorch, will convert to numpy array and return @@ -1339,7 +1339,7 @@ def inv_shift_fourier(k: NdarrayOrTensor, spatial_dims: int, n_dims: Optional[in dims = tuple(range(-spatial_dims, 0)) out: NdarrayOrTensor if isinstance(k, torch.Tensor): - if hasattr(torch.fft, "ifftshift"): + if hasattr(torch.fft, "ifftshift"): # `ifftshift` is new in torch 1.8.0 out = torch.fft.ifftn(torch.fft.ifftshift(k, dim=dims), dim=dims, norm="backward").real else: # if using old PyTorch, will convert to numpy array and return diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 862a2458f8..0cdec8226c 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -42,7 +42,7 @@ def moveaxis(x: NdarrayOrTensor, src: int, dst: int) -> NdarrayOrTensor: """`moveaxis` for pytorch and numpy, using `permute` for pytorch ver < 1.8""" if isinstance(x, torch.Tensor): - if hasattr(torch, "moveaxis"): + if hasattr(torch, "moveaxis"): # `moveaxis` is new in torch 1.8.0 return torch.moveaxis(x, src, dst) return _moveaxis_with_permute(x, src, dst) # type: ignore if isinstance(x, np.ndarray): @@ -110,7 +110,7 @@ def percentile(x: NdarrayOrTensor, q, dim: Optional[int] = None) -> Union[Ndarra result = np.percentile(x, q, axis=dim) else: q = torch.tensor(q, device=x.device) - if hasattr(torch, "quantile"): + if hasattr(torch, "quantile"): # `quantile` is new in torch 1.7.0 result = torch.quantile(x, q / 100.0, dim=dim) else: # Note that ``kthvalue()`` works one-based, i.e., the first sorted value @@ -222,7 +222,7 @@ def ravel(x: NdarrayOrTensor): Return a contiguous flattened array/tensor. """ if isinstance(x, torch.Tensor): - if hasattr(torch, "ravel"): + if hasattr(torch, "ravel"): # `ravel` is new in torch 1.8.0 return x.ravel() return x.flatten().contiguous() return np.ravel(x) @@ -268,7 +268,7 @@ def maximum(a: NdarrayOrTensor, b: NdarrayOrTensor) -> NdarrayOrTensor: """ if isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor): # is torch and has torch.maximum (pt>1.6) - if hasattr(torch, "maximum"): + if hasattr(torch, "maximum"): # `maximum` is new in torch 1.7.0 return torch.maximum(a, b) return torch.stack((a, b)).max(dim=0)[0] return np.maximum(a, b) diff --git a/monai/utils/misc.py b/monai/utils/misc.py index 1e328c8ad4..4fe63744fd 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -258,9 +258,9 @@ def set_determinism( torch.backends.cudnn.deterministic = _flag_deterministic torch.backends.cudnn.benchmark = _flag_cudnn_benchmark if use_deterministic_algorithms is not None: - if hasattr(torch, "use_deterministic_algorithms"): + if hasattr(torch, "use_deterministic_algorithms"): # `use_deterministic_algorithms` is new in torch 1.8.0 torch.use_deterministic_algorithms(use_deterministic_algorithms) - elif hasattr(torch, "set_deterministic"): + elif hasattr(torch, "set_deterministic"): # `set_deterministic` is new in torch 1.7.0 torch.set_deterministic(use_deterministic_algorithms) # type: ignore else: warnings.warn("use_deterministic_algorithms=True, but PyTorch version is too old to set the mode.") diff --git a/tests/test_mmar_download.py b/tests/test_mmar_download.py index 2a4ca0498f..2cae5969db 100644 --- a/tests/test_mmar_download.py +++ b/tests/test_mmar_download.py @@ -22,7 +22,7 @@ from monai.apps import RemoteMMARKeys, download_mmar, get_model_spec, load_from_mmar from monai.apps.mmars import MODEL_DESC from monai.apps.mmars.mmars import _get_val -from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick +from tests.utils import skip_if_quick TEST_CASES = [["clara_pt_prostate_mri_segmentation_1"], ["clara_pt_covid19_ct_lesion_segmentation_1"]] TEST_EXTRACT_CASES = [ @@ -104,7 +104,6 @@ class TestMMMARDownload(unittest.TestCase): @parameterized.expand(TEST_CASES) @skip_if_quick - @SkipIfBeforePyTorchVersion((1, 6)) def test_download(self, idx): try: # test model specification