From df86e133a046423246968f77717f2df027c14991 Mon Sep 17 00:00:00 2001 From: pinzhenx Date: Tue, 30 Jun 2020 12:04:58 +0000 Subject: [PATCH] mark expected failure test cases --- tests/cpu/common_nn.py | 1 - tests/cpu/test_bf16_lazy_reorder.py | 2 -- tests/cpu/test_rn50_cpu_ops.py | 2 +- tests/cpu/test_torch.py | 5 +++-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/cpu/common_nn.py b/tests/cpu/common_nn.py index 28c2f4acd..4d894dadf 100644 --- a/tests/cpu/common_nn.py +++ b/tests/cpu/common_nn.py @@ -63,7 +63,6 @@ from torch.nn.functional import _Reduction from common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \ TEST_WITH_ROCM -from common_cuda import TEST_CUDA from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors from torch.autograd import Variable import torch.backends.cudnn diff --git a/tests/cpu/test_bf16_lazy_reorder.py b/tests/cpu/test_bf16_lazy_reorder.py index c9b65e588..9a3e4286d 100644 --- a/tests/cpu/test_bf16_lazy_reorder.py +++ b/tests/cpu/test_bf16_lazy_reorder.py @@ -80,7 +80,6 @@ def test_batch_norm2d(self): self.assertEqual(res_bf16.dtype, torch.bfloat16) with AutoMixPrecision(True): - ipex.core.enable_mix_bf16_fp32() self.assertEqual(x_auto_mix.dtype, torch.float) self.assertFalse(ipex.core.is_bf16_dil_tensor(x_auto_mix)) res_auto_mix = bn_auto_mix(x_auto_mix) @@ -108,7 +107,6 @@ def test_batch_norm3d(self): self.assertEqual(x_man_bf16.dtype, torch.bfloat16) with AutoMixPrecision(True): - ipex.core.enable_mix_bf16_fp32() self.assertEqual(x_auto_mix.dtype, torch.float) self.assertFalse(ipex.core.is_bf16_dil_tensor(x_auto_mix)) res_auto_mix = bn_auto_mix(x_auto_mix) diff --git a/tests/cpu/test_rn50_cpu_ops.py b/tests/cpu/test_rn50_cpu_ops.py index 8d8097b99..566e4eeac 100644 --- a/tests/cpu/test_rn50_cpu_ops.py +++ b/tests/cpu/test_rn50_cpu_ops.py @@ -827,7 +827,7 @@ def test_avg_pool3d_with_zero_divisor(self): self.assertRaisesRegex(RuntimeError, "divisor must be not zero", lambda: torch.nn.functional.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0)) - @unittest.skip("oneDNN does not support this case") + @unittest.expectedFailure def test_max_pool_nan(self): for adaptive in ['', 'adaptive_']: for num_dim in [1, 2, 3]: diff --git a/tests/cpu/test_torch.py b/tests/cpu/test_torch.py index 02a6db3e2..45d6dca90 100644 --- a/tests/cpu/test_torch.py +++ b/tests/cpu/test_torch.py @@ -1581,6 +1581,7 @@ def _test_multinomial_invalid_probs(probs): except RuntimeError as e: return 'invalid multinomial distribution' in str(e) + @slowTest @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \ don't support multiprocessing with spawn start method") @unittest.skipIf(IS_WINDOWS, 'FIXME: CUDA OOM error on Windows') @@ -10265,7 +10266,7 @@ def test_unfold_scalars(self, device): self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2)) self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1)) - @unittest.skipIf(SKIP_TEST_CASE_FOR_DPCPP_STORAGE, "IPEX does not support copy") + @unittest.expectedFailure def test_copy_all_dtypes_and_devices(self, device): from copy import copy ipex.get_auto_optimization() @@ -12835,7 +12836,7 @@ def transformation_fn(tensor, **kwargs): self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) - @unittest.skipIf(SKIP_TEST_CASE_FOR_DPCPP_STORAGE, "IPEX feature limitation") + @unittest.expectedFailure def test_memory_format_clone(self, device): def get_generator(memory_format, shape): def input_generator_fn(device):