diff --git a/.lintrunner.toml b/.lintrunner.toml index 50eb09984fec7..2f2f0db17768d 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1146,19 +1146,6 @@ exclude_patterns = [ 'test/test_vulkan.py', 'test/test_xnnpack_integration.py', 'test/torch_np/numpy_test/**/*.py', - 'test/typing/fail/bitwise_ops.py', - 'test/typing/fail/creation_ops.py', - 'test/typing/fail/random.py', - 'test/typing/pass/creation_ops.py', - 'test/typing/pass/math_ops.py', - 'test/typing/reveal/module_list.py', - 'test/typing/reveal/namedtuple.py', - 'test/typing/reveal/opt_size.py', - 'test/typing/reveal/size.py', - 'test/typing/reveal/tensor_constructors.py', - 'test/typing/reveal/tensor_copy.py', - 'test/typing/reveal/tensor_sampling.py', - 'test/typing/reveal/torch_optim.py', 'torch/_awaits/__init__.py', 'torch/_custom_op/__init__.py', 'torch/_custom_op/autograd.py', diff --git a/test/typing/fail/creation_ops.py b/test/typing/fail/creation_ops.py index 2ebb9b4cc266b..c8e370143beae 100644 --- a/test/typing/fail/creation_ops.py +++ b/test/typing/fail/creation_ops.py @@ -1,6 +1,13 @@ # flake8: noqa import torch -torch.tensor([3], dtype='int32') # E: Argument "dtype" to "tensor" has incompatible type "str"; expected "dtype | None" [arg-type] -torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str" -torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str" +torch.tensor( + [3], + dtype="int32", # E: Argument "dtype" to "tensor" has incompatible type "str"; expected "dtype | None" [arg-type] +) +torch.ones( # E: No overload variant of "ones" matches argument types "int", "str" + 3, dtype="int32" +) +torch.zeros( # E: No overload variant of "zeros" matches argument types "int", "str" + 3, dtype="int32" +) diff --git a/test/typing/fail/random.py b/test/typing/fail/random.py index e87ec7f05a2ea..46f2582bfd3dd 100644 --- a/test/typing/fail/random.py +++ b/test/typing/fail/random.py @@ -1,4 +1,10 @@ # flake8: noqa import torch -torch.set_rng_state([1, 2, 3]) # E: Argument 1 to "set_rng_state" has incompatible type "list[int]"; expected "Tensor" [arg-type] +torch.set_rng_state( + [ # E: Argument 1 to "set_rng_state" has incompatible type "list[int]"; expected "Tensor" [arg-type] + 1, + 2, + 3, + ] +) diff --git a/test/typing/pass/creation_ops.py b/test/typing/pass/creation_ops.py index f866d3a1628fa..479b50ea78228 100644 --- a/test/typing/pass/creation_ops.py +++ b/test/typing/pass/creation_ops.py @@ -15,28 +15,26 @@ # torch.tensor() torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) torch.tensor([0, 1]) -torch.tensor([[0.11111, 0.222222, 0.3333333]], - dtype=torch.float64, - device=torch.device('cuda:0')) +torch.tensor( + [[0.11111, 0.222222, 0.3333333]], dtype=torch.float64, device=torch.device("cuda:0") +) torch.tensor(3.14159) # torch.sparse_coo_tensor -i = torch.tensor([[0, 1, 1], - [2, 0, 2]]) +i = torch.tensor([[0, 1, 1], [2, 0, 2]]) v = torch.tensor([3, 4, 5], dtype=torch.float32) torch.sparse_coo_tensor(i, v, [2, 4]) torch.sparse_coo_tensor(i, v) -torch.sparse_coo_tensor(i, v, [2, 4], - dtype=torch.float64, - device=torch.device('cuda:0')) +torch.sparse_coo_tensor( + i, v, [2, 4], dtype=torch.float64, device=torch.device("cuda:0") +) torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) -torch.sparse_coo_tensor(torch.empty([1, 0]), - torch.empty([0, 2]), [1, 2]) +torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) # torch.as_tensor a = [1, 2, 3] torch.as_tensor(a) -torch.as_tensor(a, device=torch.device('cuda')) +torch.as_tensor(a, device=torch.device("cuda")) # torch.as_strided x = torch.randn(3, 3) @@ -101,7 +99,9 @@ # torch.quantize_per_channel x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) -quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) +quant = torch.quantize_per_channel( + x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8 +) # torch.dequantize torch.dequantize(x) diff --git a/test/typing/pass/math_ops.py b/test/typing/pass/math_ops.py index 6b75c719d7cc2..7b347018b38e9 100644 --- a/test/typing/pass/math_ops.py +++ b/test/typing/pass/math_ops.py @@ -1,7 +1,8 @@ # flake8: noqa -import torch import math +import torch + a = torch.randn(4) b = torch.randn(4) t = torch.tensor([-1, -2, 3], dtype=torch.int8) @@ -20,8 +21,8 @@ # add torch.add(a, 20) torch.add(a, torch.randn(4, 1), alpha=10) -torch.add(a+1j, 20+1j) -torch.add(a+1j, 20, alpha=1j) +torch.add(a + 1j, 20 + 1j) +torch.add(a + 1j, 20, alpha=1j) # addcdiv torch.addcdiv(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1) @@ -30,7 +31,7 @@ torch.addcmul(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1) # angle -torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 +torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) * 180 / 3.14159 # asin/arcsin torch.asin(a) @@ -91,37 +92,41 @@ torch.deg2rad(torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])) # div/divide/true_divide -x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) +x = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) torch.div(x, 0.5) -p = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], - [ 0.1815, -1.0111, 0.9805, -1.5923], - [ 0.1062, 1.4581, 0.7759, -1.2344], - [-0.1830, -0.0313, 1.1908, -1.4757]]) -q = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) +p = torch.tensor( + [ + [-0.3711, -1.9353, -0.4605, -0.2917], + [0.1815, -1.0111, 0.9805, -1.5923], + [0.1062, 1.4581, 0.7759, -1.2344], + [-0.1830, -0.0313, 1.1908, -1.4757], + ] +) +q = torch.tensor([0.8032, 0.2930, -0.8113, -0.2308]) torch.div(p, q) -torch.divide(p, q, rounding_mode='trunc') -torch.divide(p, q, rounding_mode='floor') +torch.divide(p, q, rounding_mode="trunc") +torch.divide(p, q, rounding_mode="floor") # digamma torch.digamma(torch.tensor([1, 0.5])) # erf -torch.erf(torch.tensor([0, -1., 10.])) +torch.erf(torch.tensor([0, -1.0, 10.0])) # erfc -torch.erfc(torch.tensor([0, -1., 10.])) +torch.erfc(torch.tensor([0, -1.0, 10.0])) # erfinv -torch.erfinv(torch.tensor([0, 0.5, -1.])) +torch.erfinv(torch.tensor([0, 0.5, -1.0])) # exp -torch.exp(torch.tensor([0, math.log(2.)])) +torch.exp(torch.tensor([0, math.log(2.0)])) # exp2 -torch.exp2(torch.tensor([0, math.log2(2.), 3, 4])) +torch.exp2(torch.tensor([0, math.log2(2.0), 3, 4])) # expm1 -torch.expm1(torch.tensor([0, math.log(2.)])) +torch.expm1(torch.tensor([0, math.log(2.0)])) # fake_quantize_per_channel_affine x = torch.randn(2, 2, 2) @@ -140,11 +145,11 @@ torch.floor(a) # floor_divide -torch.floor_divide(torch.tensor([4., 3.]), torch.tensor([2., 2.])) -torch.floor_divide(torch.tensor([4., 3.]), 1.4) +torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])) +torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4) # fmod -torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) +torch.fmod(torch.tensor([-3.0, -2, -1, 1, 2, 3]), 2) torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5) # frac @@ -154,11 +159,11 @@ torch.randn(4, dtype=torch.cfloat).imag # ldexp -torch.ldexp(torch.tensor([1.]), torch.tensor([1])) +torch.ldexp(torch.tensor([1.0]), torch.tensor([1])) torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) # lerp -start = torch.arange(1., 5.) +start = torch.arange(1.0, 5.0) end = torch.empty(4).fill_(10) torch.lerp(start, end, 0.5) torch.lerp(start, end, torch.full_like(start, 0.5)) @@ -200,8 +205,11 @@ # logical_not torch.logical_not(torch.tensor([True, False])) torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) -torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) -torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) +torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)) +torch.logical_not( + torch.tensor([0.0, 1.0, -10.0], dtype=torch.double), + out=torch.empty(3, dtype=torch.int16), +) # logical_or torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) @@ -235,13 +243,13 @@ # mul/multiply torch.mul(torch.randn(3), 100) torch.multiply(torch.randn(4, 1), torch.randn(1, 4)) -torch.mul(torch.randn(3)+1j, 100+1j) +torch.mul(torch.randn(3) + 1j, 100 + 1j) # mvlgamma torch.mvlgamma(torch.empty(2, 3).uniform_(1, 2), 2) # nan_to_num -w = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) +w = torch.tensor([float("nan"), float("inf"), -float("inf"), 3.14]) torch.nan_to_num(x) torch.nan_to_num(x, nan=2.0) torch.nan_to_num(x, nan=2.0, posinf=1.0) @@ -251,7 +259,9 @@ # nextafter eps = torch.finfo(torch.float32).eps -torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]) +torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor( + [eps + 1, 2 - eps] +) # polygamma torch.polygamma(1, torch.tensor([1, 0.5])) @@ -261,7 +271,7 @@ # pow torch.pow(a, 2) -torch.pow(torch.arange(1., 5.), torch.arange(1., 5.)) +torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)) # rad2deg torch.rad2deg(torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])) @@ -273,7 +283,7 @@ torch.reciprocal(a) # remainder -torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) +torch.remainder(torch.tensor([-3.0, -2, -1, 1, 2, 3]), 2) torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5) # round @@ -286,13 +296,13 @@ torch.sigmoid(a) # sign -torch.sign(torch.tensor([0.7, -1.2, 0., 2.3])) +torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])) # sgn -torch.tensor([3+4j, 7-24j, 0, 1+2j]).sgn() +torch.tensor([3 + 4j, 7 - 24j, 0, 1 + 2j]).sgn() # signbit -torch.signbit(torch.tensor([0.7, -1.2, 0., 2.3])) +torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])) # sin torch.sin(a) @@ -324,8 +334,10 @@ torch.trunc(a) # xlogy -f = torch.zeros(5,) -g = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) +f = torch.zeros( + 5, +) +g = torch.tensor([-1, 0, 1, float("inf"), float("nan")]) torch.xlogy(f, g) f = torch.tensor([1, 2, 3]) diff --git a/test/typing/reveal/module_list.py b/test/typing/reveal/module_list.py index 8eda43e36480d..354835f45ab2c 100644 --- a/test/typing/reveal/module_list.py +++ b/test/typing/reveal/module_list.py @@ -1,12 +1,15 @@ import torch + # ModuleList with elements of type Module class FooModule(torch.nn.Module): pass + class BarModule(torch.nn.Module): pass + ml: torch.nn.ModuleList = torch.nn.ModuleList([FooModule(), BarModule()]) ml[0].children() == [] # noqa: B015 reveal_type(ml) # E: {ModuleList} diff --git a/test/typing/reveal/namedtuple.py b/test/typing/reveal/namedtuple.py index 085270d7f9a87..8ee3465f41044 100644 --- a/test/typing/reveal/namedtuple.py +++ b/test/typing/reveal/namedtuple.py @@ -4,13 +4,13 @@ t = torch.tensor([[3.0, 1.5], [2.0, 1.5]]) t_sort = t.sort() -t_sort[0][0, 0] == 1.5 # noqa: B015 -t_sort.indices[0, 0] == 1 # noqa: B015 +t_sort[0][0, 0] == 1.5 # noqa: B015 +t_sort.indices[0, 0] == 1 # noqa: B015 t_sort.values[0, 0] == 1.5 # noqa: B015 reveal_type(t_sort) # E: torch.return_types.sort t_qr = torch.linalg.qr(t) -t_qr[0].shape == [2, 2] # noqa: B015 -t_qr.Q.shape == [2, 2] # noqa: B015 +t_qr[0].shape == [2, 2] # noqa: B015 +t_qr.Q.shape == [2, 2] # noqa: B015 # TODO: Fixme, should be Tuple[{Tensor}, {Tensor}, fallback=torch.return_types.qr] reveal_type(t_qr) # E: Any diff --git a/test/typing/reveal/size.py b/test/typing/reveal/size.py index 2e145ed4f4633..631f5c9407ce3 100644 --- a/test/typing/reveal/size.py +++ b/test/typing/reveal/size.py @@ -1,4 +1,5 @@ import torch + input = [] input.append(torch.tensor([1.0, 2.0, 3.0, 4.0])) input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]])) diff --git a/test/typing/reveal/tensor_constructors.py b/test/typing/reveal/tensor_constructors.py index 5f64042243613..9648f7514d18c 100644 --- a/test/typing/reveal/tensor_constructors.py +++ b/test/typing/reveal/tensor_constructors.py @@ -2,6 +2,7 @@ # flake8: noqa import torch from torch.testing._internal.common_utils import TEST_NUMPY + if TEST_NUMPY: import numpy as np @@ -11,29 +12,35 @@ # torch.tensor() reveal_type(torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])) # E: {Tensor} reveal_type(torch.tensor([0, 1])) # E: {Tensor} -reveal_type(torch.tensor([[0.11111, 0.222222, 0.3333333]], - dtype=torch.float64, - device=torch.device('cuda:0'))) # E: {Tensor} +reveal_type( + torch.tensor( + [[0.11111, 0.222222, 0.3333333]], + dtype=torch.float64, + device=torch.device("cuda:0"), + ) +) # E: {Tensor} reveal_type(torch.tensor(3.14159)) # E: {Tensor} # torch.sparse_coo_tensor -i = torch.tensor([[0, 1, 1], - [2, 0, 2]]) # E: {Tensor} +i = torch.tensor([[0, 1, 1], [2, 0, 2]]) # E: {Tensor} v = torch.tensor([3, 4, 5], dtype=torch.float32) # E: {Tensor} reveal_type(torch.sparse_coo_tensor(i, v, [2, 4])) # E: {Tensor} reveal_type(torch.sparse_coo_tensor(i, v)) # E: {Tensor} -reveal_type(torch.sparse_coo_tensor(i, v, [2, 4], - dtype=torch.float64, - device=torch.device('cuda:0'))) # E: {Tensor} +reveal_type( + torch.sparse_coo_tensor( + i, v, [2, 4], dtype=torch.float64, device=torch.device("cuda:0") + ) +) # E: {Tensor} reveal_type(torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])) # E: {Tensor} -reveal_type(torch.sparse_coo_tensor(torch.empty([1, 0]), - torch.empty([0, 2]), [1, 2])) # E: {Tensor} +reveal_type( + torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) +) # E: {Tensor} # torch.as_tensor if TEST_NUMPY: a = np.array([1, 2, 3]) reveal_type(torch.as_tensor(a)) # E: {Tensor} - reveal_type(torch.as_tensor(a, device=torch.device('cuda'))) # E: {Tensor} + reveal_type(torch.as_tensor(a, device=torch.device("cuda"))) # E: {Tensor} # torch.as_strided x = torch.randn(3, 3) @@ -89,11 +96,17 @@ reveal_type(torch.full_like(torch.full((2, 3), 3.141592), 2.71828)) # E: {Tensor} # torch.quantize_per_tensor -reveal_type(torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)) # E: {Tensor} +reveal_type( + torch.quantize_per_tensor( + torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8 + ) +) # E: {Tensor} # torch.quantize_per_channel x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) -quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) +quant = torch.quantize_per_channel( + x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8 +) reveal_type(x) # E: {Tensor} # torch.dequantize diff --git a/test/typing/reveal/torch_optim.py b/test/typing/reveal/torch_optim.py index 4f7a7e6518166..edff954ae9a97 100644 --- a/test/typing/reveal/torch_optim.py +++ b/test/typing/reveal/torch_optim.py @@ -4,6 +4,7 @@ def foo(opt: torch.optim.Optimizer) -> None: opt.zero_grad() + opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)]) reveal_type(opt_adagrad) # E: {Adagrad} foo(opt_adagrad)