Skip to content

Commit

Permalink
Enable UFMT format on test/typing files (#126038)
Browse files Browse the repository at this point in the history
Fixes some files in #123062

Run lintrunner on files:
test/typing/**/*

```
$ lintrunner -a --take UFMT --all-files
ok No lint issues.
Successfully applied all patches.
```
Pull Request resolved: #126038
Approved by: https://github.com/shink, https://github.com/ezyang
  • Loading branch information
hippocookie authored and pytorchmergebot committed May 21, 2024
1 parent 1cc9354 commit 7ee74d9
Show file tree
Hide file tree
Showing 10 changed files with 111 additions and 81 deletions.
13 changes: 0 additions & 13 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -1146,19 +1146,6 @@ exclude_patterns = [
'test/test_vulkan.py',
'test/test_xnnpack_integration.py',
'test/torch_np/numpy_test/**/*.py',
'test/typing/fail/bitwise_ops.py',
'test/typing/fail/creation_ops.py',
'test/typing/fail/random.py',
'test/typing/pass/creation_ops.py',
'test/typing/pass/math_ops.py',
'test/typing/reveal/module_list.py',
'test/typing/reveal/namedtuple.py',
'test/typing/reveal/opt_size.py',
'test/typing/reveal/size.py',
'test/typing/reveal/tensor_constructors.py',
'test/typing/reveal/tensor_copy.py',
'test/typing/reveal/tensor_sampling.py',
'test/typing/reveal/torch_optim.py',
'torch/_awaits/__init__.py',
'torch/_custom_op/__init__.py',
'torch/_custom_op/autograd.py',
Expand Down
13 changes: 10 additions & 3 deletions test/typing/fail/creation_ops.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
# flake8: noqa
import torch

torch.tensor([3], dtype='int32') # E: Argument "dtype" to "tensor" has incompatible type "str"; expected "dtype | None" [arg-type]
torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str"
torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str"
torch.tensor(
[3],
dtype="int32", # E: Argument "dtype" to "tensor" has incompatible type "str"; expected "dtype | None" [arg-type]
)
torch.ones( # E: No overload variant of "ones" matches argument types "int", "str"
3, dtype="int32"
)
torch.zeros( # E: No overload variant of "zeros" matches argument types "int", "str"
3, dtype="int32"
)
8 changes: 7 additions & 1 deletion test/typing/fail/random.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
# flake8: noqa
import torch

torch.set_rng_state([1, 2, 3]) # E: Argument 1 to "set_rng_state" has incompatible type "list[int]"; expected "Tensor" [arg-type]
torch.set_rng_state(
[ # E: Argument 1 to "set_rng_state" has incompatible type "list[int]"; expected "Tensor" [arg-type]
1,
2,
3,
]
)
24 changes: 12 additions & 12 deletions test/typing/pass/creation_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,28 +15,26 @@
# torch.tensor()
torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
torch.tensor([0, 1])
torch.tensor([[0.11111, 0.222222, 0.3333333]],
dtype=torch.float64,
device=torch.device('cuda:0'))
torch.tensor(
[[0.11111, 0.222222, 0.3333333]], dtype=torch.float64, device=torch.device("cuda:0")
)
torch.tensor(3.14159)

# torch.sparse_coo_tensor
i = torch.tensor([[0, 1, 1],
[2, 0, 2]])
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3, 4, 5], dtype=torch.float32)
torch.sparse_coo_tensor(i, v, [2, 4])
torch.sparse_coo_tensor(i, v)
torch.sparse_coo_tensor(i, v, [2, 4],
dtype=torch.float64,
device=torch.device('cuda:0'))
torch.sparse_coo_tensor(
i, v, [2, 4], dtype=torch.float64, device=torch.device("cuda:0")
)
torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
torch.sparse_coo_tensor(torch.empty([1, 0]),
torch.empty([0, 2]), [1, 2])
torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])

# torch.as_tensor
a = [1, 2, 3]
torch.as_tensor(a)
torch.as_tensor(a, device=torch.device('cuda'))
torch.as_tensor(a, device=torch.device("cuda"))

# torch.as_strided
x = torch.randn(3, 3)
Expand Down Expand Up @@ -101,7 +99,9 @@

# torch.quantize_per_channel
x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
quant = torch.quantize_per_channel(
x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8
)

# torch.dequantize
torch.dequantize(x)
Expand Down
82 changes: 47 additions & 35 deletions test/typing/pass/math_ops.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# flake8: noqa
import torch
import math

import torch

a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
Expand All @@ -20,8 +21,8 @@
# add
torch.add(a, 20)
torch.add(a, torch.randn(4, 1), alpha=10)
torch.add(a+1j, 20+1j)
torch.add(a+1j, 20, alpha=1j)
torch.add(a + 1j, 20 + 1j)
torch.add(a + 1j, 20, alpha=1j)

# addcdiv
torch.addcdiv(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1)
Expand All @@ -30,7 +31,7 @@
torch.addcmul(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1)

# angle
torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) * 180 / 3.14159

# asin/arcsin
torch.asin(a)
Expand Down Expand Up @@ -91,37 +92,41 @@
torch.deg2rad(torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]))

# div/divide/true_divide
x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
x = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
torch.div(x, 0.5)
p = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
[ 0.1815, -1.0111, 0.9805, -1.5923],
[ 0.1062, 1.4581, 0.7759, -1.2344],
[-0.1830, -0.0313, 1.1908, -1.4757]])
q = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
p = torch.tensor(
[
[-0.3711, -1.9353, -0.4605, -0.2917],
[0.1815, -1.0111, 0.9805, -1.5923],
[0.1062, 1.4581, 0.7759, -1.2344],
[-0.1830, -0.0313, 1.1908, -1.4757],
]
)
q = torch.tensor([0.8032, 0.2930, -0.8113, -0.2308])
torch.div(p, q)
torch.divide(p, q, rounding_mode='trunc')
torch.divide(p, q, rounding_mode='floor')
torch.divide(p, q, rounding_mode="trunc")
torch.divide(p, q, rounding_mode="floor")

# digamma
torch.digamma(torch.tensor([1, 0.5]))

# erf
torch.erf(torch.tensor([0, -1., 10.]))
torch.erf(torch.tensor([0, -1.0, 10.0]))

# erfc
torch.erfc(torch.tensor([0, -1., 10.]))
torch.erfc(torch.tensor([0, -1.0, 10.0]))

# erfinv
torch.erfinv(torch.tensor([0, 0.5, -1.]))
torch.erfinv(torch.tensor([0, 0.5, -1.0]))

# exp
torch.exp(torch.tensor([0, math.log(2.)]))
torch.exp(torch.tensor([0, math.log(2.0)]))

# exp2
torch.exp2(torch.tensor([0, math.log2(2.), 3, 4]))
torch.exp2(torch.tensor([0, math.log2(2.0), 3, 4]))

# expm1
torch.expm1(torch.tensor([0, math.log(2.)]))
torch.expm1(torch.tensor([0, math.log(2.0)]))

# fake_quantize_per_channel_affine
x = torch.randn(2, 2, 2)
Expand All @@ -140,11 +145,11 @@
torch.floor(a)

# floor_divide
torch.floor_divide(torch.tensor([4., 3.]), torch.tensor([2., 2.]))
torch.floor_divide(torch.tensor([4., 3.]), 1.4)
torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0]))
torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4)

# fmod
torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
torch.fmod(torch.tensor([-3.0, -2, -1, 1, 2, 3]), 2)
torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5)

# frac
Expand All @@ -154,11 +159,11 @@
torch.randn(4, dtype=torch.cfloat).imag

# ldexp
torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
torch.ldexp(torch.tensor([1.0]), torch.tensor([1]))
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))

# lerp
start = torch.arange(1., 5.)
start = torch.arange(1.0, 5.0)
end = torch.empty(4).fill_(10)
torch.lerp(start, end, 0.5)
torch.lerp(start, end, torch.full_like(start, 0.5))
Expand Down Expand Up @@ -200,8 +205,11 @@
# logical_not
torch.logical_not(torch.tensor([True, False]))
torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double))
torch.logical_not(
torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
out=torch.empty(3, dtype=torch.int16),
)

# logical_or
torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
Expand Down Expand Up @@ -235,13 +243,13 @@
# mul/multiply
torch.mul(torch.randn(3), 100)
torch.multiply(torch.randn(4, 1), torch.randn(1, 4))
torch.mul(torch.randn(3)+1j, 100+1j)
torch.mul(torch.randn(3) + 1j, 100 + 1j)

# mvlgamma
torch.mvlgamma(torch.empty(2, 3).uniform_(1, 2), 2)

# nan_to_num
w = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
w = torch.tensor([float("nan"), float("inf"), -float("inf"), 3.14])
torch.nan_to_num(x)
torch.nan_to_num(x, nan=2.0)
torch.nan_to_num(x, nan=2.0, posinf=1.0)
Expand All @@ -251,7 +259,9 @@

# nextafter
eps = torch.finfo(torch.float32).eps
torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps])
torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor(
[eps + 1, 2 - eps]
)

# polygamma
torch.polygamma(1, torch.tensor([1, 0.5]))
Expand All @@ -261,7 +271,7 @@

# pow
torch.pow(a, 2)
torch.pow(torch.arange(1., 5.), torch.arange(1., 5.))
torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0))

# rad2deg
torch.rad2deg(torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]))
Expand All @@ -273,7 +283,7 @@
torch.reciprocal(a)

# remainder
torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
torch.remainder(torch.tensor([-3.0, -2, -1, 1, 2, 3]), 2)
torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5)

# round
Expand All @@ -286,13 +296,13 @@
torch.sigmoid(a)

# sign
torch.sign(torch.tensor([0.7, -1.2, 0., 2.3]))
torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3]))

# sgn
torch.tensor([3+4j, 7-24j, 0, 1+2j]).sgn()
torch.tensor([3 + 4j, 7 - 24j, 0, 1 + 2j]).sgn()

# signbit
torch.signbit(torch.tensor([0.7, -1.2, 0., 2.3]))
torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3]))

# sin
torch.sin(a)
Expand Down Expand Up @@ -324,8 +334,10 @@
torch.trunc(a)

# xlogy
f = torch.zeros(5,)
g = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
f = torch.zeros(
5,
)
g = torch.tensor([-1, 0, 1, float("inf"), float("nan")])
torch.xlogy(f, g)

f = torch.tensor([1, 2, 3])
Expand Down
3 changes: 3 additions & 0 deletions test/typing/reveal/module_list.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
import torch


# ModuleList with elements of type Module
class FooModule(torch.nn.Module):
pass


class BarModule(torch.nn.Module):
pass


ml: torch.nn.ModuleList = torch.nn.ModuleList([FooModule(), BarModule()])
ml[0].children() == [] # noqa: B015
reveal_type(ml) # E: {ModuleList}
8 changes: 4 additions & 4 deletions test/typing/reveal/namedtuple.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
t = torch.tensor([[3.0, 1.5], [2.0, 1.5]])

t_sort = t.sort()
t_sort[0][0, 0] == 1.5 # noqa: B015
t_sort.indices[0, 0] == 1 # noqa: B015
t_sort[0][0, 0] == 1.5 # noqa: B015
t_sort.indices[0, 0] == 1 # noqa: B015
t_sort.values[0, 0] == 1.5 # noqa: B015
reveal_type(t_sort) # E: torch.return_types.sort

t_qr = torch.linalg.qr(t)
t_qr[0].shape == [2, 2] # noqa: B015
t_qr.Q.shape == [2, 2] # noqa: B015
t_qr[0].shape == [2, 2] # noqa: B015
t_qr.Q.shape == [2, 2] # noqa: B015
# TODO: Fixme, should be Tuple[{Tensor}, {Tensor}, fallback=torch.return_types.qr]
reveal_type(t_qr) # E: Any
1 change: 1 addition & 0 deletions test/typing/reveal/size.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import torch

input = []
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
Expand Down

0 comments on commit 7ee74d9

Please sign in to comment.