From 626080d306a8b7c39f09b9350888fda3b98987bf Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 25 Nov 2022 06:18:36 +0000 Subject: [PATCH 1/6] set version --- .github/workflows/building.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/building.yml b/.github/workflows/building.yml index c69ff4bc..81173f26 100644 --- a/.github/workflows/building.yml +++ b/.github/workflows/building.yml @@ -45,7 +45,6 @@ jobs: - name: Upgrade pip run: | pip install --upgrade setuptools - pip list - name: Free up disk space if: ${{ runner.os == 'Linux' }} @@ -67,7 +66,11 @@ jobs: if: ${{ runner.os != 'macOS' }} run: | VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_sparse/__init__.py` - sed -i "s/$VERSION/$VERSION+${{ matrix.cuda-version }}/" torch_sparse/__init__.py + TORCH_VERSION=`echo "pt${{ matrix.torch-version }}" | sed "s/..$//" | sed "s/\.//g"` + CUDA_VERSION=`echo ${{ matrix.cuda-version }}` + echo "New version name: $VERSION+$TORCH_VERSION$CUDA_VERSION" + sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" setup.py + sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" torch_sparse/__init__.py shell: bash From a9daac3f87197124cf8bffcaf3fb0a727b9973c7 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 25 Nov 2022 06:31:00 +0000 Subject: [PATCH 2/6] update --- torch_sparse/cat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_sparse/cat.py b/torch_sparse/cat.py index d65164ee..77c8892a 100644 --- a/torch_sparse/cat.py +++ b/torch_sparse/cat.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Tuple +from typing import Optional, List, Tuple # noqa import torch from torch_sparse.storage import SparseStorage From 432358c4bc6f0a1f4f0ad88227a4e8de51fda698 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 25 Nov 2022 08:34:26 +0000 Subject: [PATCH 3/6] update --- .github/workflows/building.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/building.yml b/.github/workflows/building.yml index 81173f26..f4ae4611 100644 --- a/.github/workflows/building.yml +++ b/.github/workflows/building.yml @@ -10,10 +10,13 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] + # os: [ubuntu-18.04, macos-10.15, windows-2019] + os: [ubuntu-18.04] python-version: ['3.7', '3.8', '3.9', '3.10'] - torch-version: [1.12.0, 1.13.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117'] + # torch-version: [1.12.0, 1.13.0] + torch-version: [1.12.0] + cuda-version: ['cu113'] + # cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117'] exclude: - torch-version: 1.12.0 cuda-version: 'cu117' @@ -103,9 +106,9 @@ jobs: shell: bash - - name: Test installation - run: | - python -c "import torch_sparse; print('torch-sparse:', torch_sparse.__version__)" + # - name: Test installation + # run: | + # python -c "import torch_sparse; print('torch-sparse:', torch_sparse.__version__)" - name: Build wheel run: | From e06af9a0a731660e95c414bd8592ec9a556db898 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 25 Nov 2022 08:35:08 +0000 Subject: [PATCH 4/6] update --- .github/workflows/building.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/building.yml b/.github/workflows/building.yml index f4ae4611..81173f26 100644 --- a/.github/workflows/building.yml +++ b/.github/workflows/building.yml @@ -10,13 +10,10 @@ jobs: strategy: fail-fast: false matrix: - # os: [ubuntu-18.04, macos-10.15, windows-2019] - os: [ubuntu-18.04] + os: [ubuntu-18.04, macos-10.15, windows-2019] python-version: ['3.7', '3.8', '3.9', '3.10'] - # torch-version: [1.12.0, 1.13.0] - torch-version: [1.12.0] - cuda-version: ['cu113'] - # cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117'] + torch-version: [1.12.0, 1.13.0] + cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117'] exclude: - torch-version: 1.12.0 cuda-version: 'cu117' @@ -106,9 +103,9 @@ jobs: shell: bash - # - name: Test installation - # run: | - # python -c "import torch_sparse; print('torch-sparse:', torch_sparse.__version__)" + - name: Test installation + run: | + python -c "import torch_sparse; print('torch-sparse:', torch_sparse.__version__)" - name: Build wheel run: | From 98c091c3a195af9cc9cf5a348b2feb15dcb8f83a Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 25 Nov 2022 09:13:10 +0000 Subject: [PATCH 5/6] fix test --- test/test_matmul.py | 18 +++++++++++++++--- test/test_spspmm.py | 11 +++++++++-- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/test/test_matmul.py b/test/test_matmul.py index e2fab21e..3ec14356 100644 --- a/test/test_matmul.py +++ b/test/test_matmul.py @@ -3,6 +3,7 @@ import pytest import torch import torch_scatter + from torch_sparse.matmul import matmul from torch_sparse.tensor import SparseTensor @@ -12,6 +13,9 @@ @pytest.mark.parametrize('dtype,device,reduce', product(grad_dtypes, devices, reductions)) def test_spmm(dtype, device, reduce): + if device == torch.device('cuda:0') and dtype == torch.bfloat16: + return # Not yet implemented. + src = torch.randn((10, 8), dtype=dtype, device=device) src[2:4, :] = 0 # Remove multiple rows. src[:, 2:4] = 0 # Remove multiple columns. @@ -39,13 +43,21 @@ def test_spmm(dtype, device, reduce): out = matmul(src, other, reduce) out.backward(grad_out) - assert torch.allclose(expected, out, atol=1e-2) - assert torch.allclose(expected_grad_value, value.grad, atol=1e-2) - assert torch.allclose(expected_grad_other, other.grad, atol=1e-2) + if dtype == torch.float16 or dtype == torch.bfloat16: + assert torch.allclose(expected, out, atol=1e-1) + assert torch.allclose(expected_grad_value, value.grad, atol=1e-1) + assert torch.allclose(expected_grad_other, other.grad, atol=1e-1) + else: + assert torch.allclose(expected, out) + assert torch.allclose(expected_grad_value, value.grad) + assert torch.allclose(expected_grad_other, other.grad) @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) def test_spspmm(dtype, device): + if device == torch.device('cuda:0') and dtype == torch.bfloat16: + return # Not yet implemented. + src = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtype, device=device) diff --git a/test/test_spspmm.py b/test/test_spspmm.py index 3d1e2f36..95647b84 100644 --- a/test/test_spspmm.py +++ b/test/test_spspmm.py @@ -2,13 +2,17 @@ import pytest import torch -from torch_sparse import spspmm, SparseTensor -from .utils import grad_dtypes, devices, tensor +from torch_sparse import SparseTensor, spspmm + +from .utils import devices, grad_dtypes, tensor @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) def test_spspmm(dtype, device): + if device == torch.device('cuda:0') and dtype == torch.bfloat16: + return # Not yet implemented. + indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device) valueA = tensor([1, 2, 3, 4, 5], dtype, device) indexB = torch.tensor([[0, 2], [1, 0]], device=device) @@ -21,6 +25,9 @@ def test_spspmm(dtype, device): @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) def test_sparse_tensor_spspmm(dtype, device): + if device == torch.device('cuda:0') and dtype == torch.bfloat16: + return # Not yet implemented. + x = SparseTensor( row=torch.tensor( [0, 1, 1, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9], From 09db5a9e55ea8ba18915078a1989e4e799105244 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 29 Nov 2022 08:51:20 +0000 Subject: [PATCH 6/6] fix test --- test/__init__.py | 0 test/test_add.py | 4 ++-- test/test_cat.py | 6 +++--- test/test_diag.py | 4 ++-- test/test_eye.py | 4 ++-- test/test_matmul.py | 3 +-- test/test_metis.py | 4 ++-- test/test_permute.py | 4 ++-- test/test_spmm.py | 4 ++-- test/test_spspmm.py | 3 +-- test/test_storage.py | 4 ++-- test/test_tensor.py | 8 ++++---- test/test_transpose.py | 4 ++-- test/utils.py => torch_sparse/testing.py | 6 ++++-- 14 files changed, 29 insertions(+), 29 deletions(-) delete mode 100644 test/__init__.py rename test/utils.py => torch_sparse/testing.py (80%) diff --git a/test/__init__.py b/test/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_add.py b/test/test_add.py index e4839220..0fc3971b 100644 --- a/test/test_add.py +++ b/test/test_add.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse import SparseTensor, add -from .utils import dtypes, devices, tensor +from torch_sparse import SparseTensor, add +from torch_sparse.testing import devices, dtypes, tensor @pytest.mark.parametrize('dtype,device', product(dtypes, devices)) diff --git a/test/test_cat.py b/test/test_cat.py index be1e1e53..1b07799f 100644 --- a/test/test_cat.py +++ b/test/test_cat.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse.tensor import SparseTensor -from torch_sparse.cat import cat -from .utils import devices, tensor +from torch_sparse.cat import cat +from torch_sparse.tensor import SparseTensor +from torch_sparse.testing import devices, tensor @pytest.mark.parametrize('device', devices) diff --git a/test/test_diag.py b/test/test_diag.py index 0ed7564a..a3142fce 100644 --- a/test/test_diag.py +++ b/test/test_diag.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse.tensor import SparseTensor -from .utils import dtypes, devices, tensor +from torch_sparse.tensor import SparseTensor +from torch_sparse.testing import devices, dtypes, tensor @pytest.mark.parametrize('dtype,device', product(dtypes, devices)) diff --git a/test/test_eye.py b/test/test_eye.py index 293d676f..f3f2275b 100644 --- a/test/test_eye.py +++ b/test/test_eye.py @@ -1,9 +1,9 @@ from itertools import product import pytest -from torch_sparse.tensor import SparseTensor -from .utils import dtypes, devices +from torch_sparse.tensor import SparseTensor +from torch_sparse.testing import devices, dtypes @pytest.mark.parametrize('dtype,device', product(dtypes, devices)) diff --git a/test/test_matmul.py b/test/test_matmul.py index 3ec14356..70c26a24 100644 --- a/test/test_matmul.py +++ b/test/test_matmul.py @@ -6,8 +6,7 @@ from torch_sparse.matmul import matmul from torch_sparse.tensor import SparseTensor - -from .utils import devices, grad_dtypes, reductions +from torch_sparse.testing import devices, grad_dtypes, reductions @pytest.mark.parametrize('dtype,device,reduce', diff --git a/test/test_metis.py b/test/test_metis.py index 897526af..a0eac32c 100644 --- a/test/test_metis.py +++ b/test/test_metis.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse.tensor import SparseTensor -from .utils import devices +from torch_sparse.tensor import SparseTensor +from torch_sparse.testing import devices try: rowptr = torch.tensor([0, 1]) diff --git a/test/test_permute.py b/test/test_permute.py index 206338f1..d8fd041f 100644 --- a/test/test_permute.py +++ b/test/test_permute.py @@ -1,8 +1,8 @@ import pytest import torch -from torch_sparse.tensor import SparseTensor -from .utils import devices, tensor +from torch_sparse.tensor import SparseTensor +from torch_sparse.testing import devices, tensor @pytest.mark.parametrize('device', devices) diff --git a/test/test_spmm.py b/test/test_spmm.py index 4d0e05c7..f0bb0a80 100644 --- a/test/test_spmm.py +++ b/test/test_spmm.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse import spmm -from .utils import dtypes, devices, tensor +from torch_sparse import spmm +from torch_sparse.testing import devices, dtypes, tensor @pytest.mark.parametrize('dtype,device', product(dtypes, devices)) diff --git a/test/test_spspmm.py b/test/test_spspmm.py index 95647b84..d6311d85 100644 --- a/test/test_spspmm.py +++ b/test/test_spspmm.py @@ -4,8 +4,7 @@ import torch from torch_sparse import SparseTensor, spspmm - -from .utils import devices, grad_dtypes, tensor +from torch_sparse.testing import devices, grad_dtypes, tensor @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) diff --git a/test/test_storage.py b/test/test_storage.py index 81bfbb49..04f62bb6 100644 --- a/test/test_storage.py +++ b/test/test_storage.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse.storage import SparseStorage -from .utils import dtypes, devices, tensor +from torch_sparse.storage import SparseStorage +from torch_sparse.testing import devices, dtypes, tensor @pytest.mark.parametrize('device', devices) diff --git a/test/test_tensor.py b/test/test_tensor.py index c83abcbc..e94f6926 100644 --- a/test/test_tensor.py +++ b/test/test_tensor.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse import SparseTensor -from .utils import grad_dtypes, devices +from torch_sparse import SparseTensor +from torch_sparse.testing import devices, grad_dtypes @pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) @@ -15,8 +15,8 @@ def test_getitem(dtype, device): mat = torch.randn(m, n, dtype=dtype, device=device) mat = SparseTensor.from_dense(mat) - idx1 = torch.randint(0, m, (k,), dtype=torch.long, device=device) - idx2 = torch.randint(0, n, (k,), dtype=torch.long, device=device) + idx1 = torch.randint(0, m, (k, ), dtype=torch.long, device=device) + idx2 = torch.randint(0, n, (k, ), dtype=torch.long, device=device) bool1 = torch.zeros(m, dtype=torch.bool, device=device) bool2 = torch.zeros(n, dtype=torch.bool, device=device) bool1.scatter_(0, idx1, 1) diff --git a/test/test_transpose.py b/test/test_transpose.py index 8cf6946a..18bdcdd5 100644 --- a/test/test_transpose.py +++ b/test/test_transpose.py @@ -2,9 +2,9 @@ import pytest import torch -from torch_sparse import transpose -from .utils import dtypes, devices, tensor +from torch_sparse import transpose +from torch_sparse.testing import devices, dtypes, tensor @pytest.mark.parametrize('dtype,device', product(dtypes, devices)) diff --git a/test/utils.py b/torch_sparse/testing.py similarity index 80% rename from test/utils.py rename to torch_sparse/testing.py index 7cc58c1d..9383ee07 100644 --- a/test/utils.py +++ b/torch_sparse/testing.py @@ -1,3 +1,5 @@ +from typing import Any + import torch import torch_scatter from packaging import version @@ -13,8 +15,8 @@ devices = [torch.device('cpu')] if torch.cuda.is_available(): - devices += [torch.device(f'cuda:{torch.cuda.current_device()}')] + devices += [torch.device('cuda:0')] -def tensor(x, dtype, device): +def tensor(x: Any, dtype: torch.dtype, device: torch.device): return None if x is None else torch.tensor(x, dtype=dtype, device=device)