diff --git a/torch/testing/_internal/common_modules.py b/torch/testing/_internal/common_modules.py index ffd0e6f95a87d..9ae062df85ae8 100644 --- a/torch/testing/_internal/common_modules.py +++ b/torch/testing/_internal/common_modules.py @@ -25,7 +25,7 @@ nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction) from torch.testing._internal.common_utils import ( freeze_rng_state, set_single_threaded_if_parallel_tbb, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS, - skipIfTorchDynamo) + skipIfTorchDynamo, TEST_WITH_TORCHINDUCTOR) from types import ModuleType from typing import List, Tuple, Type, Set, Dict import operator @@ -127,7 +127,7 @@ def _parametrize_test(self, test, generic_cls, device_cls): def test_wrapper(*args, **kwargs): return test(*args, **kwargs) - if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR: + if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR: test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper) decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, @@ -3469,7 +3469,12 @@ def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_module_exhaustive', active_if=operator.itemgetter('training') - ),) + ), + # test fails if run alone in inductor https://github.com/pytorch/pytorch/issues/125967 + DecorateInfo( + unittest.skip("Skipped https://github.com/pytorch/pytorch/issues/125967"), + 'TestModule', 'test_memory_format', device_type='cuda', + active_if=(TEST_WITH_TORCHINDUCTOR)),) ), ModuleInfo(torch.nn.BatchNorm3d, train_and_eval_differ=True,