Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion test/test_binary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from torch.testing._internal.common_device_type import (
expectedFailureMeta, instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyNativeDeviceTypes,
skipCUDAIfRocm, skipIf, ops, OpDTypes)
skipCUDAIfRocm, skipIf, ops, OpDTypes, skipMeta)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, integral_types_and, get_all_dtypes, get_all_int_dtypes, get_all_math_dtypes,
Expand Down Expand Up @@ -1497,6 +1497,7 @@ def test_complex_scalar_pow_tensor(self, device, dtype):
self._test_pow(base, second_exp)

@onlyNativeDeviceTypes
@skipMeta
def test_pow_scalar_type_promotion(self, device):
# Test against a scalar and non-scalar input
inputs = [17, [17]]
Expand Down Expand Up @@ -3393,6 +3394,7 @@ def test_empty_x(sizes, dim, x, device):
TypeError, 'received an invalid combination of arguments'):
actual = torch.cumulative_trapezoid(torch.randn((3, 3)), x=torch.randn((3, 3)), dx=3)

@skipMeta
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
Expand Down
3 changes: 2 additions & 1 deletion test/test_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol)
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck, gradgradcheck)
Expand Down Expand Up @@ -233,6 +233,7 @@ def test_pickle(self, device, dtype, module_info):

@modules([module_info for module_info in module_db
if 'inplace' in signature(module_info.module_cls).parameters])
@skipMeta
def test_check_inplace(self, device, dtype, module_info):
# Check if the inplace variant of the module gives the same result as the out of place
# variant.
Expand Down
56 changes: 32 additions & 24 deletions test/test_tensor_creation_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from torch.testing._internal.common_utils import (
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, slowTest,
TEST_SCIPY, IS_MACOS, IS_PPC, IS_WINDOWS)
TEST_SCIPY, IS_MACOS, IS_PPC, IS_WINDOWS, parametrize)
from torch.testing._internal.common_device_type import (
expectedFailureMeta, instantiate_device_type_tests, deviceCountAtLeast, onlyNativeDeviceTypes,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
Expand Down Expand Up @@ -2786,36 +2786,44 @@ def test_tensor_ctor_device_inference(self, device):
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))

def _test_signal_window_functions(self, name, dtype, device, **kwargs):
import scipy.signal as signal

torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, periodic=periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)

@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long)
def test_signal_window_functions(self, device, dtype):
import scipy.signal as signal

def test(name, kwargs):
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, periodic=periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)

for window in ['hann', 'hamming', 'bartlett', 'blackman']:
test(window, kwargs={})
@parametrize("window", ['hann', 'hamming', 'bartlett', 'blackman'])
def test_signal_window_functions(self, device, dtype, window):
self._test_signal_window_functions(window, dtype, device)

@onlyNativeDeviceTypes
# See https://github.com/pytorch/pytorch/issues/72630
@skipMeta
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long)
def test_kaiser_window(self, device, dtype):
for num_test in range(50):
test('kaiser', kwargs={'beta': random.random() * 30})
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)

def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
Expand Down
5 changes: 2 additions & 3 deletions test/test_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,11 +574,10 @@ def test_unknown_layout(self):

def test_meta(self):
actual = torch.empty((2, 2), device="meta")
expected = actual.clone()
expected = torch.empty((2, 2), device="meta")

for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(NotImplementedError, "meta"):
fn()
fn()

def test_mismatching_layout(self):
strided = torch.empty((2, 2))
Expand Down
Loading