Skip to content

Commit

Permalink
Fixing even more linting issues
Browse files Browse the repository at this point in the history
  • Loading branch information
jayanthd04 committed Apr 27, 2024
1 parent af8653c commit 35e1b6a
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -4648,8 +4648,8 @@ def test_graph_optims(self, device, dtype, optim_info):
"betas": (0.8, 0.7),
"foreach": foreach,
"decoupled_weight_decay": decoupled_weight_decay,
"weight_decay": weight_decay
}
"weight_decay": weight_decay,
}
for foreach, decoupled_weight_decay, weight_decay in product(
(False, True), (False, True), (0.0, 0.1)
)
Expand Down Expand Up @@ -4723,11 +4723,11 @@ def test_graph_optims(self, device, dtype, optim_info):
"lr": 0.1,
"foreach": foreach,
"maximize": maximize,
"weight_decay": weight_decay
}
"weight_decay": weight_decay,
}
for foreach, maximize, weight_decay in product(
(False, True), (False, True), (0, 0.1)
)
)
)
],
torch.optim.RMSprop: [
Expand Down Expand Up @@ -4795,7 +4795,7 @@ def test_graph_scaling_fused_optimizers(self, device, dtype, optim_info):
"weight_decay": w,
"nesterov": n,
"fused": True,
}
}
for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
),
],
Expand Down Expand Up @@ -4830,14 +4830,14 @@ def test_graph_scaling_fused_optimizers(self, device, dtype, optim_info):
with torch.no_grad():
scaler_for_control._lazy_init_scale_growth_tracker(
torch.device("cuda")
)
)

scaler_for_graphed = torch.cuda.amp.GradScaler()
scaler_for_graphed.load_state_dict(scaler_for_control.state_dict())
with torch.no_grad():
scaler_for_graphed._lazy_init_scale_growth_tracker(
torch.device("cuda")
)
)

# Control (capturable=False)
if has_capturable_arg:
Expand Down

0 comments on commit 35e1b6a

Please sign in to comment.