Skip to content

Commit 9d6597b

Browse files
cyyeverpytorchmergebot
authored andcommitted
Correctly use test parameters (#166726)
This PR uses unused arguments in some tests. Pull Request resolved: #166726 Approved by: https://github.com/rec, https://github.com/albanD, https://github.com/Skylion007
1 parent e8fadba commit 9d6597b

File tree

4 files changed

+6
-7
lines changed

4 files changed

+6
-7
lines changed

test/distributed/fsdp/test_fsdp_misc.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -514,18 +514,17 @@ def test_fsdp_optimizer_overlap(self):
514514
def test_fsdp_cpu_training(self):
515515
"""Tests FSDP training on CPU."""
516516
gloo_pg = dist.new_group(backend="gloo")
517-
for ss in [ # noqa: F841
517+
for ss in [
518518
ShardingStrategy.NO_SHARD,
519519
ShardingStrategy.FULL_SHARD,
520520
ShardingStrategy.SHARD_GRAD_OP,
521-
ShardingStrategy.HYBRID_SHARD,
522-
ShardingStrategy._HYBRID_SHARD_ZERO2,
523521
]:
524522
torch.manual_seed(42)
525523
model = MyModel()
526524
ref_model = DDP(deepcopy(model), process_group=gloo_pg)
527525
model = FSDP(
528526
model,
527+
sharding_strategy=ss,
529528
auto_wrap_policy=always_wrap_policy,
530529
process_group=gloo_pg,
531530
device_id=torch.device("cpu"),

test/quantization/core/test_quantized_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1062,8 +1062,8 @@ def _test_qtensor_masked_fill(self, device):
10621062
mask = torch.randint(0, 2, (numel, ), device=device)
10631063
mask = mask.bool()
10641064
x = torch.rand(numel, device=device)
1065-
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)
10661065
for qtype, fill_with in itertools.product(types, fills):
1066+
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)
10671067
q_masked_fill = qx.clone()
10681068
q_masked_fill.masked_fill_(mask, fill_with)
10691069
ref = qx.clone()

test/test_spectral_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,7 @@ def test_fftn_noop_transform(self, device, dtype):
481481
torch.fft.ifft2,
482482
]:
483483
inp = make_tensor((10, 10), device=device, dtype=dtype)
484-
out = torch.fft.fftn(inp, dim=[])
484+
out = op(inp, dim=[])
485485

486486
expect_dtype = RESULT_TYPE.get(inp.dtype, inp.dtype)
487487
expect = inp.to(expect_dtype)

test/test_torch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2263,8 +2263,8 @@ def check(t, correction=1, fweights=None, aweights=None):
22632263
if num_observations > 0:
22642264
fweights = torch.randint(1, 10, (num_observations,), device=device)
22652265
aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1)
2266-
for correction, _fw, _aw in product([0, 1, 2], [None, fweights], [None, aweights]):
2267-
check(x, correction, fweights, aweights)
2266+
for correction, fw, aw in product([0, 1, 2], [None, fweights], [None, aweights]):
2267+
check(x, correction, fw, aw)
22682268

22692269
@skipIfNoSciPy
22702270
@dtypes(*floating_types_and(torch.half, torch.bfloat16))

0 commit comments

Comments
 (0)