From bd1f4f84fe5f15f5a781badbf544fa2f0c4ebc94 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Wed, 7 Oct 2020 04:04:49 -0500 Subject: [PATCH 1/5] Fixes #45113 - Fixed bug in sspaddmm by calling contiguous on indices --- .../ATen/native/sparse/SparseTensorMath.cpp | 3 +- test/test_sparse.py | 40 +++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/aten/src/ATen/native/sparse/SparseTensorMath.cpp b/aten/src/ATen/native/sparse/SparseTensorMath.cpp index 2bb5842b4726..6a3441a401df 100644 --- a/aten/src/ATen/native/sparse/SparseTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseTensorMath.cpp @@ -1084,7 +1084,8 @@ SparseTensor& _sspaddmm_out_cpu( "sspaddmm: Argument #1: Expected dim 1 size ", dim_k, ", got ", t.size(1)); int64_t nnz = sparse._nnz(); - LongTensor indices = sparse._indices(); + // We have to make indices contiguous as we use indices.data_ptr in _to_csr which assumes row-contiguous storage + LongTensor indices = sparse._indices().contiguous(); Tensor values = sparse._values(); LongTensor csr = _to_csr(indices.data_ptr(), dim_i, nnz); diff --git a/test/test_sparse.py b/test/test_sparse.py index 2a0e76afe36a..3910fed12104 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1080,6 +1080,46 @@ def test_shape(di, dj, dk, nnz): test_shape(1000, 0, 100, 0) test_shape(1000, 100, 0, 0) + @cpu_only + def test_sspaddmm(self): + + def test_shape(di, dj, dk, nnz): + x = self._gen_sparse(2, nnz, [di, dj])[0] + t = self._gen_sparse(2, nnz, [di, dk])[0] + y = torch.randn(dj, dk) + alpha = random.random() + beta = random.random() + + res = t.sspaddmm(x, y, beta=beta, alpha=alpha) + expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha) + self.assertEqual(self.safeToDense(res), expected) + + res = t.sspaddmm(x, y) + expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y) + self.assertEqual(self.safeToDense(res), expected) + + test_shape(7, 5, 3, 20) + test_shape(1000, 100, 100, 20) + test_shape(3000, 64, 300, 20) + test_shape(0, 100, 100, 0) + test_shape(1000, 0, 100, 0) + test_shape(1000, 100, 0, 0) + + # Test issue https://github.com/pytorch/pytorch/issues/45113 + batch_size, input_size, hidden_size = 5, 3, 7 + weight = torch.randn(hidden_size, input_size).to_sparse() + bias = torch.randn((hidden_size, 1)).to_sparse() + bias = torch.cat([bias] * batch_size, dim=1) + if not self.is_uncoalesced: + weight = weight.coalesce() + bias = bias.coalesce() + + x = torch.randn(batch_size, input_size) + y = bias.sspaddmm(weight, x.t()) + + y_bis = (bias.to_dense() + torch.matmul(weight.to_dense(), x.t())).to_sparse() + self.assertLess((y.to_dense() - y_bis.to_dense()).abs().max().item(), 1e-6) + def test_sparse_addmm(self): def test_shape(m, n, p, nnz, broadcast): if broadcast: From f7fd9436e795e43e3b9c95d9657e1af8fcc9308e Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Thu, 8 Oct 2020 03:27:14 -0500 Subject: [PATCH 2/5] Fixed tests according to review --- test/test_sparse.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/test_sparse.py b/test/test_sparse.py index 3910fed12104..698b39a1a65a 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1103,22 +1103,25 @@ def test_shape(di, dj, dk, nnz): test_shape(3000, 64, 300, 20) test_shape(0, 100, 100, 0) test_shape(1000, 0, 100, 0) - test_shape(1000, 100, 0, 0) + test_shape(1000, 100, 0, 0) - # Test issue https://github.com/pytorch/pytorch/issues/45113 + # Test code from issue https://github.com/pytorch/pytorch/issues/45113 batch_size, input_size, hidden_size = 5, 3, 7 - weight = torch.randn(hidden_size, input_size).to_sparse() + # Create uncoalesced sparse tensors: + weight = torch.randn(hidden_size, 1).to_sparse() + weight = torch.cat([weight] * input_size, dim=1) bias = torch.randn((hidden_size, 1)).to_sparse() bias = torch.cat([bias] * batch_size, dim=1) + if not self.is_uncoalesced: weight = weight.coalesce() bias = bias.coalesce() - x = torch.randn(batch_size, input_size) - y = bias.sspaddmm(weight, x.t()) + x = torch.randn(input_size, batch_size) + res = bias.sspaddmm(weight, x) - y_bis = (bias.to_dense() + torch.matmul(weight.to_dense(), x.t())).to_sparse() - self.assertLess((y.to_dense() - y_bis.to_dense()).abs().max().item(), 1e-6) + true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse() + self.assertTrue(res.to_dense().equal(true_result.to_dense())) def test_sparse_addmm(self): def test_shape(m, n, p, nnz, broadcast): From ab5897d31dc87ae8b434126ebd6dbbbdb4b70a95 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Thu, 8 Oct 2020 15:13:37 -0500 Subject: [PATCH 3/5] Fixed failing tests --- test/test_sparse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_sparse.py b/test/test_sparse.py index 698b39a1a65a..77cfe85ff83e 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1121,7 +1121,7 @@ def test_shape(di, dj, dk, nnz): res = bias.sspaddmm(weight, x) true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse() - self.assertTrue(res.to_dense().equal(true_result.to_dense())) + self.assertEqual(self.safeToDense(res), self.safeToDense(true_result)) def test_sparse_addmm(self): def test_shape(m, n, p, nnz, broadcast): From 19a1405513fd54dd77a1c6a149b3b2bda531c1b4 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Sun, 11 Oct 2020 18:46:47 -0500 Subject: [PATCH 4/5] Updated test according to review --- test/test_sparse.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/test_sparse.py b/test/test_sparse.py index 77cfe85ff83e..ba7b5d527a28 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1107,14 +1107,16 @@ def test_shape(di, dj, dk, nnz): # Test code from issue https://github.com/pytorch/pytorch/issues/45113 batch_size, input_size, hidden_size = 5, 3, 7 - # Create uncoalesced sparse tensors: - weight = torch.randn(hidden_size, 1).to_sparse() - weight = torch.cat([weight] * input_size, dim=1) + + # Create coalesced sparse tensor as in the issue + weight = torch.randn(hidden_size, input_size).to_sparse() + self.assertTrue(weight.is_coalesced()) + self.assertFalse(weight._indices().is_contiguous()) + # Create un/coalesced sparse tensor bias = torch.randn((hidden_size, 1)).to_sparse() bias = torch.cat([bias] * batch_size, dim=1) if not self.is_uncoalesced: - weight = weight.coalesce() bias = bias.coalesce() x = torch.randn(input_size, batch_size) From ba5fdfc0986830e0216fa2218a7bbc9ff2193d7b Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Mon, 12 Oct 2020 03:14:46 -0500 Subject: [PATCH 5/5] Fixed flake8 --- test/test_sparse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_sparse.py b/test/test_sparse.py index ba7b5d527a28..e365c94aae99 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1107,7 +1107,7 @@ def test_shape(di, dj, dk, nnz): # Test code from issue https://github.com/pytorch/pytorch/issues/45113 batch_size, input_size, hidden_size = 5, 3, 7 - + # Create coalesced sparse tensor as in the issue weight = torch.randn(hidden_size, input_size).to_sparse() self.assertTrue(weight.is_coalesced())