Skip to content

Commit

Permalink
Merge pull request pytorch#119 from jithunnair-amd/enable_test_sparse
Browse files Browse the repository at this point in the history
Enable test_sparse unit tests for ROCm builds in CI
  • Loading branch information
iotamudelta authored Aug 13, 2018
2 parents 6eef9c2 + ef1586f commit b678ab2
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 2 deletions.
1 change: 0 additions & 1 deletion test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
'multiprocessing',
'nccl',
'nn',
'sparse',
'utils',
]

Expand Down
27 changes: 26 additions & 1 deletion test/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import functools
import random
import unittest
from common import TestCase, run_tests
from common import TestCase, run_tests, skipIfRocm
from common_cuda import TEST_CUDA
from test_torch import TestTorch
from numbers import Number
Expand Down Expand Up @@ -107,6 +107,7 @@ def randn(self, *args, **kwargs):
# TODO: Put this in torch.cuda.randn
return self.ValueTensor(*args, **kwargs).normal_()

@skipIfRocm
def test_basic(self):
x, i, v = self._gen_sparse(3, 10, 100)

Expand Down Expand Up @@ -155,6 +156,7 @@ def test_ctor_size_checks(self):
RuntimeError,
lambda: self.SparseTensor(indices, values, torch.Size([2, 4, 2, 1])))

@skipIfRocm
def test_to_dense(self):
i = self.IndexTensor([
[0, 1, 2, 2],
Expand Down Expand Up @@ -184,6 +186,7 @@ def test_to_dense(self):
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))

@skipIfRocm
def test_shared(self):
i = self.IndexTensor([[2]])
v = self.ValueTensor([5])
Expand All @@ -193,6 +196,7 @@ def test_shared(self):
i[0][0] = 0
self.assertEqual(self.ValueTensor([6, 0, 0]), self.safeToDense(x))

@skipIfRocm
def test_to_dense_hybrid(self):
i = self.IndexTensor([
[0, 1, 2, 2],
Expand Down Expand Up @@ -221,6 +225,7 @@ def test_to_dense_hybrid(self):
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))

@skipIfRocm
def test_contig(self):
i = self.IndexTensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
Expand Down Expand Up @@ -274,6 +279,7 @@ def test_contig(self):
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())

@skipIfRocm
def test_contig_hybrid(self):
i = self.IndexTensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
Expand Down Expand Up @@ -333,6 +339,7 @@ def test_contig_hybrid(self):
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())

@skipIfRocm
def test_clone(self):
x, _, _ = self._gen_sparse(4, 20, 5)
if self.is_uncoalesced:
Expand All @@ -354,6 +361,7 @@ def test_cuda_empty(self):
self.assertEqual(y._sparseDims(), x._sparseDims())
self.assertEqual(y._denseDims(), x._denseDims())

@skipIfRocm
def test_transpose(self):
x = self._gen_sparse(4, 20, 5)[0]
y = self.safeToDense(x)
Expand All @@ -367,6 +375,7 @@ def test_transpose(self):
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)

@skipIfRocm
def test_transpose_coalesce_invariant(self):
# If a sparse tensor is coalesced, its transpose should be the same
# If a sparse tensor is uncoalesed, its transpose should be the same
Expand Down Expand Up @@ -407,6 +416,7 @@ def test_t_empty(self):
self.assertEqual(x._sparseDims(), 2)
self.assertEqual(x._denseDims(), 0)

@skipIfRocm
def test_add_zeros(self):
def test_shape(sparse_dims, sizes):
x, _, _ = self._gen_sparse(sparse_dims, 20, sizes)
Expand Down Expand Up @@ -470,6 +480,7 @@ def test_shape(di, dj, dk):
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)

@skipIfRocm
def test_dsmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
Expand All @@ -483,6 +494,7 @@ def test_shape(di, dj, dk):
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)

@skipIfRocm
def test_hsmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
Expand Down Expand Up @@ -543,18 +555,21 @@ def _test_spadd_shape(self, shape_i, shape_v=None):
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)

@skipIfRocm
def test_spadd(self):
self._test_spadd_shape([5, 6])
self._test_spadd_shape([10, 10, 10])
self._test_spadd_shape([50, 30, 20])
self._test_spadd_shape([5, 5, 5, 5, 5, 5])

@skipIfRocm
def test_spadd_hybrid(self):
self._test_spadd_shape([5, 6], [2, 3])
self._test_spadd_shape([10, 10, 10], [3])
self._test_spadd_shape([50, 30, 20], [2])
self._test_spadd_shape([5, 5, 5, 5, 5, 5], [2])

@skipIfRocm
def test_norm(self):
x, _, _ = self._gen_sparse(3, 10, 100)
y = x.coalesce()
Expand Down Expand Up @@ -623,18 +638,21 @@ def _test_basic_ops_shape(self, shape_i, shape_v=None):
y._values().add_(1)
self.assertEqual(z._values() + 1, y._values())

@skipIfRocm
def test_basic_ops(self):
self._test_basic_ops_shape([5, 6])
self._test_basic_ops_shape([10, 10, 10])
self._test_basic_ops_shape([50, 30, 20])
self._test_basic_ops_shape([5, 5, 5, 5, 5, 5])

@skipIfRocm
def test_basic_ops_hybrid(self):
self._test_basic_ops_shape([5, 6], [2, 3])
self._test_basic_ops_shape([10, 10, 10], [3])
self._test_basic_ops_shape([50, 30, 20], [2])
self._test_basic_ops_shape([5, 5, 5, 5, 5, 5], [2])

@skipIfRocm
def test_add_dense_sparse_mismatch(self):
x = torch.zeros([3, 4], dtype=self.value_dtype, device=self.device)
sparse_y = self.SparseTensor(torch.zeros(1, 4, dtype=torch.int64, device=self.device),
Expand Down Expand Up @@ -673,6 +691,7 @@ def _test_sparse_mask_fixed(self):
expected = self.SparseTensor(i, exp_v, torch.Size([5, 4]))
self.assertEqual(res, expected)

@skipIfRocm
def test_sparse_mask(self):
self._test_sparse_mask_fixed()

Expand All @@ -692,6 +711,7 @@ def _test_zeros(self, shape, out_shape_i, out_shape_v=None):
self.assertEqual(out._sparseDims(), len(shape))
self.assertEqual(out._denseDims(), 0)

@skipIfRocm
def test_log1p(self):
if self.is_cuda:
input = torch.cuda.sparse.DoubleTensor(
Expand Down Expand Up @@ -775,6 +795,7 @@ def _test_sparse_mask_hybrid_fixed(self):
expected = self.SparseTensor(i, exp_v, torch.Size([5, 4, 2]))
self.assertEqual(res, expected)

@skipIfRocm
def test_sparse_variable_methods(self):
# TODO: delete when tensor/variable are merged
from torch.autograd import Variable
Expand Down Expand Up @@ -870,6 +891,7 @@ def test_sparse_variable_methods(self):
self.assertEqual(test_fn(sp_var, de_var).data,
test_fn(sp_mat, de_mat), test_name)

@skipIfRocm
def test_sparse_mask_hybrid(self):
self._test_sparse_mask_hybrid_fixed()

Expand All @@ -878,6 +900,7 @@ def test_sparse_mask_hybrid(self):
self._test_sparse_mask_shape([50, 30, 20], [2])
self._test_sparse_mask_shape([5, 5, 5, 5, 5, 5], [2])

@skipIfRocm
def test_sparse_add_coalesce(self):
i = self.IndexTensor([[1, 2, 1]])
v = self.ValueTensor([3, 4, 5])
Expand Down Expand Up @@ -932,6 +955,7 @@ def test_new_device_multi_gpu(self):
self._test_new_device((30, 20), 1)
self._test_new_device((30, 20, 10), 1)

@skipIfRocm
def test_new(self):
x, indices, values = self._gen_sparse(3, 10, 100)
if not x.is_cuda:
Expand Down Expand Up @@ -1062,6 +1086,7 @@ def test_is_sparse(self):
x = self.SparseTensor()
self.assertTrue(x.is_sparse)

@skipIfRocm
def test_resize_as(self):
def do_test(t):
y = t.new().resize_as_(t).zero_()
Expand Down

0 comments on commit b678ab2

Please sign in to comment.