Skip to content

Commit

Permalink
Enable more unit tests (ROCm 255) (pytorch#12486)
Browse files Browse the repository at this point in the history
Summary:
* Enable more tests that relied on CPU LAPACK at compile time.
* enabled min/max tests in test_cuda (ROCm 236)

bddppq ezyang

Tests ran as part of the ROCm CI here: ROCm#255
Pull Request resolved: pytorch#12486

Differential Revision: D10262534

Pulled By: ezyang

fbshipit-source-id: 167a06fc8232af006f4b33dcc625815fd4b06d6b
  • Loading branch information
iotamudelta authored and gchanan committed Oct 10, 2018
1 parent c0f7bb6 commit 54db2df
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 25 deletions.
11 changes: 4 additions & 7 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1990,7 +1990,6 @@ def test_cat_empty(self):
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor)

@skipIfRocm
def test_potrf(self):
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)

Expand Down Expand Up @@ -2154,7 +2153,6 @@ def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))

@skipIfRocm
def test_pinverse(self):
# Why is pinverse tested this way, and not ordinarily as other linear algebra methods?
# 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable
Expand Down Expand Up @@ -2550,7 +2548,6 @@ def backward(ctx, gO):
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))

@skipIfRocm
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
Expand Down Expand Up @@ -3189,13 +3186,13 @@ class dont_convert(tuple):
'large', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S), (random_fullrank_matrix_distinct_singular_value(S),), '', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S, S), (random_fullrank_matrix_distinct_singular_value(S, S),),
'batched', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 3, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 3),),
'batched_dims', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_dims', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 2, S, S), (random_fullrank_matrix_distinct_singular_value(S, 1),),
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack]),
('gesv', (1, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 2),),
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack]),
('fill_', (S, S, S), (1,), 'number'),
('fill_', (), (1,), 'number_scalar'),
# FIXME: we should compute the derivative w.r.t torch.tensor(1)
Expand Down
30 changes: 12 additions & 18 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,25 +353,23 @@ def tmp(t):
('kthvalue', small_3d_unique, lambda t: [3],),
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'),
('kthvalue', small_3d_unique, lambda t: [3, -1], 'neg_dim'),
('lerp', small_3d, lambda t: [small_3d(t), 0.3], '', types, False, "skipIfRocm:HalfTensor"),
('max', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('max', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('lerp', small_3d, lambda t: [small_3d(t), 0.3]),
('max', small_3d_unique, lambda t: []),
('max', small_3d_unique, lambda t: [1], 'dim'),
('max', small_3d_unique, lambda t: [-1], 'neg_dim'),
('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('min', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('min', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('mean', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mode', small_3d, lambda t: [], '', types, False, skipIfRocm),
('mode', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mean', small_3d, lambda t: []),
('mean', small_3d, lambda t: [-1], 'neg_dim'),
('mean', small_3d, lambda t: [1], 'dim'),
('mode', small_3d, lambda t: []),
('mode', small_3d, lambda t: [1], 'dim'),
('mode', small_3d, lambda t: [-1], 'neg_dim'),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half),
('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"),
('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types),
('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
Expand Down Expand Up @@ -977,7 +975,6 @@ def test_broadcast_cpu(self):
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())

@skipIfRocm
def test_min_max_nan(self):
tests = [(lambda x: x.min(), 'min'),
(lambda x: x.max(), 'max'),
Expand Down Expand Up @@ -1743,7 +1740,6 @@ def test_tensor_scatterAdd(self):
def test_tensor_scatterFill(self):
TestTorch._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', True, test_bounds=False)

@skipIfRocm
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
Expand All @@ -1757,11 +1753,9 @@ def test_min_max_inits(self):
_, v = y.min(dim=0)
self.assertEqual(v, expected)

@skipIfRocm
def test_max_with_inf(self):
TestTorch._test_max_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')

@skipIfRocm
def test_min_with_inf(self):
TestTorch._test_min_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')

Expand Down

0 comments on commit 54db2df

Please sign in to comment.