Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable more unit tests #255

Merged
11 changes: 4 additions & 7 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1990,7 +1990,6 @@ def test_cat_empty(self):
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor)

@skipIfRocm
def test_potrf(self):
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)

Expand Down Expand Up @@ -2150,7 +2149,6 @@ def run_test(input_size, exponent):
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)

@skipIfRocm
def test_pinverse(self):
# Why is pinverse tested this way, and not ordinarily as other linear algebra methods?
# 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable
Expand Down Expand Up @@ -2546,7 +2544,6 @@ def backward(ctx, gO):
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))

@skipIfRocm
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
Expand Down Expand Up @@ -3185,13 +3182,13 @@ class dont_convert(tuple):
'large', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S), (random_fullrank_matrix_distinct_singular_value(S),), '', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S, S), (random_fullrank_matrix_distinct_singular_value(S, S),),
'batched', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 3, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 3),),
'batched_dims', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_dims', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 2, S, S), (random_fullrank_matrix_distinct_singular_value(S, 1),),
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack]),
('gesv', (1, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 2),),
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack]),
('fill_', (S, S, S), (1,), 'number'),
('fill_', (), (1,), 'number_scalar'),
# FIXME: we should compute the derivative w.r.t torch.tensor(1)
Expand Down
30 changes: 12 additions & 18 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,25 +353,23 @@ def tmp(t):
('kthvalue', small_3d_unique, lambda t: [3],),
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'),
('kthvalue', small_3d_unique, lambda t: [3, -1], 'neg_dim'),
('lerp', small_3d, lambda t: [small_3d(t), 0.3], '', types, False, "skipIfRocm:HalfTensor"),
('max', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('max', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('lerp', small_3d, lambda t: [small_3d(t), 0.3]),
('max', small_3d_unique, lambda t: []),
('max', small_3d_unique, lambda t: [1], 'dim'),
('max', small_3d_unique, lambda t: [-1], 'neg_dim'),
('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('min', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('min', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('mean', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mode', small_3d, lambda t: [], '', types, False, skipIfRocm),
('mode', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mean', small_3d, lambda t: []),
('mean', small_3d, lambda t: [-1], 'neg_dim'),
('mean', small_3d, lambda t: [1], 'dim'),
('mode', small_3d, lambda t: []),
('mode', small_3d, lambda t: [1], 'dim'),
('mode', small_3d, lambda t: [-1], 'neg_dim'),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half),
('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"),
('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types),
('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
Expand Down Expand Up @@ -977,7 +975,6 @@ def test_broadcast_cpu(self):
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())

@skipIfRocm
def test_min_max_nan(self):
tests = [(lambda x: x.min(), 'min'),
(lambda x: x.max(), 'max'),
Expand Down Expand Up @@ -1743,7 +1740,6 @@ def test_tensor_scatterAdd(self):
def test_tensor_scatterFill(self):
TestTorch._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', True, test_bounds=False)

@skipIfRocm
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
Expand All @@ -1757,11 +1753,9 @@ def test_min_max_inits(self):
_, v = y.min(dim=0)
self.assertEqual(v, expected)

@skipIfRocm
def test_max_with_inf(self):
TestTorch._test_max_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')

@skipIfRocm
def test_min_with_inf(self):
TestTorch._test_min_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')

Expand Down