From d0290d71d7765b8b123d5598663f9c1d6fe4b42f Mon Sep 17 00:00:00 2001 From: Richard Barnes Date: Fri, 5 Jan 2024 13:45:10 -0800 Subject: [PATCH] Annotate some functions that return None Summary: Test functions return None. This codemod fixes that so type annotation efforts can focus on trickier cases. Reviewed By: azad-meta Differential Revision: D52570295 fbshipit-source-id: 9a9ae509b79a9716a21c1f3ca71e342d4b0c28e7 --- opacus/tests/grad_samples/conv2d_test.py | 2 +- opacus/tests/gradient_accumulation_test.py | 18 +++---- opacus/tests/module_validator_test.py | 2 +- opacus/tests/multigpu_gradcheck.py | 2 +- opacus/tests/poisson_test.py | 8 +-- opacus/tests/privacy_engine_test.py | 48 ++++++++--------- .../tests/privacy_engine_validation_test.py | 20 +++---- opacus/tests/prv_accountant.py | 2 +- opacus/tests/randomness_test.py | 52 +++++++++---------- opacus/tests/validators/batch_norm_test.py | 6 +-- opacus/tests/validators/instance_norm_test.py | 6 +-- opacus/tests/validators/lstm_test.py | 6 +-- .../validators/multihead_attention_test.py | 6 +-- 13 files changed, 89 insertions(+), 89 deletions(-) diff --git a/opacus/tests/grad_samples/conv2d_test.py b/opacus/tests/grad_samples/conv2d_test.py index cd54423a..b0deae3f 100644 --- a/opacus/tests/grad_samples/conv2d_test.py +++ b/opacus/tests/grad_samples/conv2d_test.py @@ -152,7 +152,7 @@ def test_unfold2d( assert_close(X_unfold_torch, X_unfold_opacus, atol=0, rtol=0) - def test_asymetric_dilation_and_kernel_size(self): + def test_asymetric_dilation_and_kernel_size(self) -> None: """ This test is mainly for particular use cases and can be useful for future debugging """ diff --git a/opacus/tests/gradient_accumulation_test.py b/opacus/tests/gradient_accumulation_test.py index f603b16b..2ce0f30f 100644 --- a/opacus/tests/gradient_accumulation_test.py +++ b/opacus/tests/gradient_accumulation_test.py @@ -61,7 +61,7 @@ def name(self): class GradientAccumulationTest(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.DATA_SIZE = 128 self.BATCH_SIZE = 16 self.SAMPLE_RATE = self.BATCH_SIZE / self.DATA_SIZE @@ -73,7 +73,7 @@ def setUp(self): self.setUp_data() self.setUp_model_and_optimizer() - def setUp_data(self): + def setUp_data(self) -> None: self.ds = FakeData( size=self.DATA_SIZE, image_size=(1, 35, 35), @@ -84,7 +84,7 @@ def setUp_data(self): ) self.dl = DataLoader(self.ds, batch_size=self.BATCH_SIZE) - def setUp_model_and_optimizer(self): + def setUp_model_and_optimizer(self) -> None: self.model = SampleConvNet() self.optimizer = torch.optim.SGD( self.model.parameters(), lr=self.LR, momentum=0 @@ -126,7 +126,7 @@ def model_forward_backward( if num_steps == 0: break - def test_grad_sample_accumulation(self): + def test_grad_sample_accumulation(self) -> None: """ Calling loss.backward() multiple times should sum up the gradients in .grad and accumulate all the individual gradients in .grad-sample @@ -175,7 +175,7 @@ def test_grad_sample_accumulation(self): ) self.assertTrue(torch.allclose(grad, orig_grad, atol=10e-5, rtol=10e-3)) - def test_privacy_engine_poisson_accumulation(self): + def test_privacy_engine_poisson_accumulation(self) -> None: privacy_engine = PrivacyEngine() model, optimizer, dl = privacy_engine.make_private( module=self.model, @@ -191,7 +191,7 @@ def test_privacy_engine_poisson_accumulation(self): with self.assertRaises(ValueError): self.model_forward_backward(model, dl, num_steps=1) - def test_privacy_engine_no_poisson_accumulation(self): + def test_privacy_engine_no_poisson_accumulation(self) -> None: privacy_engine = PrivacyEngine() model, optimizer, dl = privacy_engine.make_private( module=self.model, @@ -225,7 +225,7 @@ def test_privacy_engine_no_poisson_accumulation(self): f"MAD is {(orig_grad - accumulated_grad).abs().mean()}", ) - def test_privacy_engine_zero_grad(self): + def test_privacy_engine_zero_grad(self) -> None: privacy_engine = PrivacyEngine() model, optimizer, dl = privacy_engine.make_private( module=self.model, @@ -248,7 +248,7 @@ def test_privacy_engine_zero_grad(self): model, dl, optimizer, num_steps=2, do_zero_grad=False ) - def test_batch_splitter_zero_grad(self): + def test_batch_splitter_zero_grad(self) -> None: privacy_engine = PrivacyEngine() model, optimizer, dl = privacy_engine.make_private( module=self.model, @@ -274,6 +274,6 @@ def test_batch_splitter_zero_grad(self): class GradientAccumulationTestFunctorch(GradientAccumulationTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "functorch" diff --git a/opacus/tests/module_validator_test.py b/opacus/tests/module_validator_test.py index c726f3ce..4e0d9b4c 100644 --- a/opacus/tests/module_validator_test.py +++ b/opacus/tests/module_validator_test.py @@ -136,7 +136,7 @@ def forward(self, x): model.b1.bias.requires_grad = False self.assertTrue(ModuleValidator.is_valid(model)) - def test_fix_bn_with_args(self): + def test_fix_bn_with_args(self) -> None: m = nn.Sequential( OrderedDict( [ diff --git a/opacus/tests/multigpu_gradcheck.py b/opacus/tests/multigpu_gradcheck.py index a67e3273..af4e7bfe 100644 --- a/opacus/tests/multigpu_gradcheck.py +++ b/opacus/tests/multigpu_gradcheck.py @@ -140,7 +140,7 @@ def run_demo(demo_fn, weight, world_size, dp, clipping, grad_sample_mode): class GradientComputationTest(unittest.TestCase): - def test_gradient_correct(self): + def test_gradient_correct(self) -> None: # Tests that gradient is the same with DP or with DDP n_gpus = torch.cuda.device_count() self.assertTrue( diff --git a/opacus/tests/poisson_test.py b/opacus/tests/poisson_test.py index 137370a9..d89eccf8 100644 --- a/opacus/tests/poisson_test.py +++ b/opacus/tests/poisson_test.py @@ -42,11 +42,11 @@ def setUp(self) -> None: self.sampler, self.dataloader = self._init_data(seed=7) - def test_length(self): + def test_length(self) -> None: self.assertEqual(len(self.sampler), 10) self.assertEqual(len(self.dataloader), 10) - def test_batch_sizes(self): + def test_batch_sizes(self) -> None: batch_sizes = [] for x, _y in self.dataloader: batch_sizes.append(x.shape[0]) @@ -54,7 +54,7 @@ def test_batch_sizes(self): self.assertGreater(len(set(batch_sizes)), 1) self.assertAlmostEqual(np.mean(batch_sizes), self.batch_size, delta=2) - def test_same_seed(self): + def test_same_seed(self) -> None: batch_sizes1 = [] for x, _y in self.dataloader: batch_sizes1.append(x.shape[0]) @@ -66,7 +66,7 @@ def test_same_seed(self): self.assertEqual(batch_sizes1, batch_sizes2) - def test_different_seed(self): + def test_different_seed(self) -> None: batch_sizes1 = [] for x, _y in self.dataloader: batch_sizes1.append(x.shape[0]) diff --git a/opacus/tests/privacy_engine_test.py b/opacus/tests/privacy_engine_test.py index 4e2b33ff..022acfa3 100644 --- a/opacus/tests/privacy_engine_test.py +++ b/opacus/tests/privacy_engine_test.py @@ -73,11 +73,11 @@ def setUp(self): torch.manual_seed(42) @abc.abstractmethod - def _init_data(self): + def _init_data(self) -> None: pass @abc.abstractmethod - def _init_model(self): + def _init_model(self) -> None: pass def _init_vanilla_training( @@ -193,7 +193,7 @@ def closure(): if max_steps and steps >= max_steps: break - def test_basic(self): + def test_basic(self) -> None: for opt_exclude_frozen in [True, False]: with self.subTest(opt_exclude_frozen=opt_exclude_frozen): model, optimizer, dl, _ = self._init_private_training( @@ -287,7 +287,7 @@ def test_compare_to_vanilla( max_steps=max_steps, ) - def test_flat_clipping(self): + def test_flat_clipping(self) -> None: self.BATCH_SIZE = 1 max_grad_norm = 0.5 @@ -314,7 +314,7 @@ def test_flat_clipping(self): self.assertAlmostEqual(clipped_grads.norm().item(), max_grad_norm, places=3) self.assertGreater(non_clipped_grads.norm(), clipped_grads.norm()) - def test_per_layer_clipping(self): + def test_per_layer_clipping(self) -> None: self.BATCH_SIZE = 1 max_grad_norm_per_layer = 1.0 @@ -344,7 +344,7 @@ def test_per_layer_clipping(self): min(non_clipped_norm, max_grad_norm_per_layer), clipped_norm, places=3 ) - def test_sample_grad_aggregation(self): + def test_sample_grad_aggregation(self) -> None: """ Check if final gradient is indeed an aggregation over per-sample gradients """ @@ -367,7 +367,7 @@ def test_sample_grad_aggregation(self): f"Param: {p_name}", ) - def test_noise_changes_every_time(self): + def test_noise_changes_every_time(self) -> None: """ Test that adding noise results in ever different model params. We disable clipping in this test by setting it to a very high threshold. @@ -387,7 +387,7 @@ def test_noise_changes_every_time(self): for p0, p1 in zip(first_run_params, second_run_params): self.assertFalse(torch.allclose(p0, p1)) - def test_get_compatible_module_inaction(self): + def test_get_compatible_module_inaction(self) -> None: needs_no_replacement_module = nn.Linear(1, 2) fixed_module = PrivacyEngine.get_compatible_module(needs_no_replacement_module) self.assertFalse(fixed_module is needs_no_replacement_module) @@ -397,7 +397,7 @@ def test_get_compatible_module_inaction(self): ) ) - def test_model_validator(self): + def test_model_validator(self) -> None: """ Test that the privacy engine raises errors if there are unsupported modules @@ -416,7 +416,7 @@ def test_model_validator(self): grad_sample_mode=self.GRAD_SAMPLE_MODE, ) - def test_model_validator_after_fix(self): + def test_model_validator_after_fix(self) -> None: """ Test that the privacy engine fixes unsupported modules and succeeds. @@ -435,7 +435,7 @@ def test_model_validator_after_fix(self): ) self.assertTrue(1, 1) - def test_make_private_with_epsilon(self): + def test_make_private_with_epsilon(self) -> None: model, optimizer, dl = self._init_vanilla_training() target_eps = 2.0 target_delta = 1e-5 @@ -458,7 +458,7 @@ def test_make_private_with_epsilon(self): target_eps, privacy_engine.get_epsilon(target_delta), places=2 ) - def test_deterministic_run(self): + def test_deterministic_run(self) -> None: """ Tests that for 2 different models, secure seed can be fixed to produce same (deterministic) runs. @@ -483,7 +483,7 @@ def test_deterministic_run(self): "Model parameters after deterministic run must match", ) - def test_validator_weight_update_check(self): + def test_validator_weight_update_check(self) -> None: """ Test that the privacy engine raises error if ModuleValidator.fix(model) is called after the optimizer is created @@ -522,7 +522,7 @@ def test_validator_weight_update_check(self): grad_sample_mode=self.GRAD_SAMPLE_MODE, ) - def test_parameters_match(self): + def test_parameters_match(self) -> None: dl = self._init_data() m1 = self._init_model() @@ -721,7 +721,7 @@ def helper_test_noise_level( @unittest.skip("requires torchcsprng compatible with new pytorch versions") @patch("torch.normal", MagicMock(return_value=torch.Tensor([0.6]))) - def test_generate_noise_in_secure_mode(self): + def test_generate_noise_in_secure_mode(self) -> None: """ Tests that the noise is added correctly in secure_mode, according to section 5.1 in https://arxiv.org/abs/2107.10138. @@ -803,16 +803,16 @@ def _init_model(self): class PrivacyEngineConvNetEmptyBatchTest(PrivacyEngineConvNetTest): - def setUp(self): + def setUp(self) -> None: super().setUp() # This will trigger multiple empty batches with poisson sampling enabled self.BATCH_SIZE = 1 - def test_checkpoints(self): + def test_checkpoints(self) -> None: pass - def test_noise_level(self): + def test_noise_level(self) -> None: pass @@ -837,23 +837,23 @@ def _init_model(self): class PrivacyEngineConvNetFrozenTestFunctorch(PrivacyEngineConvNetFrozenTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "functorch" class PrivacyEngineConvNetTestExpandedWeights(PrivacyEngineConvNetTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "ew" @unittest.skip("Original p.grad is not available in ExpandedWeights") - def test_sample_grad_aggregation(self): + def test_sample_grad_aggregation(self) -> None: pass class PrivacyEngineConvNetTestFunctorch(PrivacyEngineConvNetTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "functorch" @@ -938,7 +938,7 @@ def _init_model( class PrivacyEngineTextTestFunctorch(PrivacyEngineTextTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "functorch" @@ -987,7 +987,7 @@ def _init_model(self): class PrivacyEngineTiedWeightsTestFunctorch(PrivacyEngineTiedWeightsTest): - def setUp(self): + def setUp(self) -> None: super().setUp() self.GRAD_SAMPLE_MODE = "functorch" diff --git a/opacus/tests/privacy_engine_validation_test.py b/opacus/tests/privacy_engine_validation_test.py index e2724d0b..04138a43 100644 --- a/opacus/tests/privacy_engine_validation_test.py +++ b/opacus/tests/privacy_engine_validation_test.py @@ -31,7 +31,7 @@ def _init(self, module, size, batch_size=10): return module, optim, dl - def test_supported_hooks(self): + def test_supported_hooks(self) -> None: module, optim, dl = self._init(BasicSupportedModule(), size=(16, 5)) module, optim, dl = self.privacy_engine.make_private( @@ -46,7 +46,7 @@ def test_supported_hooks(self): for x in dl: module(x) - def test_supported_ew(self): + def test_supported_ew(self) -> None: module, optim, dl = self._init(BasicSupportedModule(), size=(16, 5)) module, optim, dl = self.privacy_engine.make_private( @@ -61,7 +61,7 @@ def test_supported_ew(self): for x in dl: module(x) - def test_custom_linear_hooks(self): + def test_custom_linear_hooks(self) -> None: module, optim, dl = self._init(CustomLinearModule(5, 8), size=(16, 5)) try: gsm, _, _ = self.privacy_engine.make_private( @@ -76,7 +76,7 @@ def test_custom_linear_hooks(self): except ImportError: print("Test not ran because functorch not imported") - def test_custom_linear_ew(self): + def test_custom_linear_ew(self) -> None: module, optim, dl = self._init(CustomLinearModule(5, 8), size=(16, 5)) module, optim, dl = self.privacy_engine.make_private( @@ -91,7 +91,7 @@ def test_custom_linear_ew(self): for x in dl: module(x) - def test_unsupported_hooks(self): + def test_unsupported_hooks(self) -> None: try: module, optim, dl = self._init(MatmulModule(5, 8), size=(16, 5)) @@ -107,7 +107,7 @@ def test_unsupported_hooks(self): except ImportError: print("Test not ran because functorch not imported") - def test_unsupported_ew(self): + def test_unsupported_ew(self) -> None: module, optim, dl = self._init( MatmulModule(input_features=5, output_features=10), size=(16, 5), @@ -127,7 +127,7 @@ def test_unsupported_ew(self): for x in dl: module(x) - def test_extra_param_hooks_requires_grad(self): + def test_extra_param_hooks_requires_grad(self) -> None: module, optim, dl = self._init(LinearWithExtraParam(5, 8), size=(16, 5)) try: gsm, _, _ = self.privacy_engine.make_private( @@ -143,7 +143,7 @@ def test_extra_param_hooks_requires_grad(self): except ImportError: print("Test not ran because functorch not imported") - def test_extra_param_hooks_no_requires_grad(self): + def test_extra_param_hooks_no_requires_grad(self) -> None: module, optim, dl = self._init(LinearWithExtraParam(5, 8), size=(16, 5)) module.extra_param.requires_grad = False module, optim, dl = self.privacy_engine.make_private( @@ -158,7 +158,7 @@ def test_extra_param_hooks_no_requires_grad(self): for x in dl: module(x) - def test_extra_param_ew(self): + def test_extra_param_ew(self) -> None: module, optim, dl = self._init(LinearWithExtraParam(5, 8), size=(16, 5)) module, optim, dl = self.privacy_engine.make_private( module=module, @@ -172,7 +172,7 @@ def test_extra_param_ew(self): for x in dl: module(x) - def test_extra_param_disabled_ew(self): + def test_extra_param_disabled_ew(self) -> None: module, optim, dl = self._init(LinearWithExtraParam(5, 8), size=(16, 5)) module.extra_param.requires_grad = False diff --git a/opacus/tests/prv_accountant.py b/opacus/tests/prv_accountant.py index b2efd290..ee00582c 100644 --- a/opacus/tests/prv_accountant.py +++ b/opacus/tests/prv_accountant.py @@ -64,7 +64,7 @@ class PRVAccountantTest(unittest.TestCase): - def test_values(self): + def test_values(self) -> None: for (sigma, q, steps), expected_epsilon in msr_values.items(): accountant = PRVAccountant() accountant.history = [(sigma, q, steps)] diff --git a/opacus/tests/randomness_test.py b/opacus/tests/randomness_test.py index 405e61de..feae8a72 100644 --- a/opacus/tests/randomness_test.py +++ b/opacus/tests/randomness_test.py @@ -60,19 +60,19 @@ def _read_all_dp(self, dp_generator, original_generator=None): dpdl = DPDataLoader.from_data_loader(dl, generator=dp_generator) return _read_all(dpdl) - def test_no_seed(self): + def test_no_seed(self) -> None: data1 = self._read_all_dp(dp_generator=None) data2 = self._read_all_dp(dp_generator=None) self.assertNotEqualTensors(data1, data2) - def test_global_seed(self): + def test_global_seed(self) -> None: torch.manual_seed(1337) data1 = self._read_all_dp(dp_generator=None) torch.manual_seed(1337) data2 = self._read_all_dp(dp_generator=None) self.assertEqualTensors(data1, data2) - def test_custom_generator(self): + def test_custom_generator(self) -> None: gen = torch.Generator() gen.manual_seed(1337) data1 = self._read_all_dp(dp_generator=gen) @@ -80,7 +80,7 @@ def test_custom_generator(self): data2 = self._read_all_dp(dp_generator=gen) self.assertEqualTensors(data1, data2) - def test_custom_generator_with_global_seed(self): + def test_custom_generator_with_global_seed(self) -> None: gen = torch.Generator() torch.manual_seed(1337) data1 = self._read_all_dp(dp_generator=gen) @@ -88,7 +88,7 @@ def test_custom_generator_with_global_seed(self): data2 = self._read_all_dp(dp_generator=gen) self.assertNotEqualTensors(data1, data2) - def test_original_generator(self): + def test_original_generator(self) -> None: gen = torch.Generator() gen.manual_seed(1337) data1 = self._read_all_dp(dp_generator=None, original_generator=gen) @@ -96,7 +96,7 @@ def test_original_generator(self): data2 = self._read_all_dp(dp_generator=None, original_generator=gen) self.assertEqualTensors(data1, data2) - def test_custom_generator_overrides_original(self): + def test_custom_generator_overrides_original(self) -> None: dp_gen = torch.Generator() orig_gen = torch.Generator() orig_gen.manual_seed(1337) @@ -131,7 +131,7 @@ def _read_all_switch(self, orig_generator=None, new_generator=None, shuffle=True dl = switch_generator(data_loader=dl, generator=new_generator) return _read_all(dl) - def test_consistent(self): + def test_consistent(self) -> None: orig_gen = torch.Generator() orig_gen.manual_seed(1337) data1 = self._read_all_simple(orig_generator=orig_gen) @@ -139,7 +139,7 @@ def test_consistent(self): data2 = self._read_all_simple(orig_generator=orig_gen) self.assertEqualTensors(data1, data2) - def test_basic_switch(self): + def test_basic_switch(self) -> None: orig_gen = torch.Generator() orig_gen.manual_seed(1337) data1 = self._read_all_simple(orig_generator=orig_gen) @@ -150,7 +150,7 @@ def test_basic_switch(self): data2 = self._read_all_switch(new_generator=other_gen, orig_generator=orig_gen) self.assertNotEqualTensors(data1, data2) - def test_switch_same_seed(self): + def test_switch_same_seed(self) -> None: orig_gen = torch.Generator() orig_gen.manual_seed(1337) data1 = self._read_all_simple(orig_generator=orig_gen) @@ -161,7 +161,7 @@ def test_switch_same_seed(self): data2 = self._read_all_switch(new_generator=other_gen, orig_generator=orig_gen) self.assertEqualTensors(data1, data2) - def test_raise_sequential(self): + def test_raise_sequential(self) -> None: orig_gen = torch.Generator() other_gen = torch.Generator() with self.assertRaises(ValueError): @@ -196,7 +196,7 @@ def _init_training(self, generator, noise: float = 1.0): return model, dp_optim, dl - def test_no_seed(self): + def test_no_seed(self) -> None: model1, optim1, dl1 = self._init_training(generator=None) _epoch(model1, optim1, dl1) @@ -204,7 +204,7 @@ def test_no_seed(self): _epoch(model2, optim2, dl2) self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) - def test_no_noise(self): + def test_no_noise(self) -> None: model1, optim1, dl1 = self._init_training(generator=None, noise=0.0) _epoch(model1, optim1, dl1) @@ -213,7 +213,7 @@ def test_no_noise(self): self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) - def test_global_seed(self): + def test_global_seed(self) -> None: model1, optim1, dl1 = self._init_training(generator=None) torch.manual_seed(1337) _epoch(model1, optim1, dl1) @@ -223,7 +223,7 @@ def test_global_seed(self): _epoch(model2, optim2, dl2) self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) - def test_generator(self): + def test_generator(self) -> None: gen = torch.Generator() model1, optim1, dl1 = self._init_training(generator=gen) _epoch(model1, optim1, dl1) @@ -232,7 +232,7 @@ def test_generator(self): _epoch(model2, optim2, dl2) self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) - def test_generator_with_global_seed(self): + def test_generator_with_global_seed(self) -> None: gen = torch.Generator() model1, optim1, dl1 = self._init_training(generator=gen) torch.manual_seed(1337) @@ -243,7 +243,7 @@ def test_generator_with_global_seed(self): _epoch(model2, optim2, dl2) self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) - def test_generator_seed(self): + def test_generator_seed(self) -> None: gen = torch.Generator() model1, optim1, dl1 = self._init_training(generator=gen) gen.manual_seed(8888) @@ -305,7 +305,7 @@ def _init_dp_training( poisson_sampling=poisson_sampling, ) - def test_basic(self): + def test_basic(self) -> None: model1, optim1, dl1 = self._init_dp_training(secure_mode=False) _epoch(model1, optim1, dl1) @@ -315,11 +315,11 @@ def test_basic(self): self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) @unittest.skip("requires torchcsprng compatible with new pytorch versions") - def test_raise_secure_mode(self): + def test_raise_secure_mode(self) -> None: with self.assertRaises(ValueError): self._init_dp_training(secure_mode=True, noise_seed=42) - def test_global_seed(self): + def test_global_seed(self) -> None: model1, optim1, dl1 = self._init_dp_training(secure_mode=False) torch.manual_seed(1337) _epoch(model1, optim1, dl1) @@ -331,7 +331,7 @@ def test_global_seed(self): self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) @unittest.skip("requires torchcsprng compatible with new pytorch versions") - def test_secure_mode_global_seed(self): + def test_secure_mode_global_seed(self) -> None: model1, optim1, dl1 = self._init_dp_training(secure_mode=True) torch.manual_seed(1337) _epoch(model1, optim1, dl1) @@ -342,7 +342,7 @@ def test_secure_mode_global_seed(self): self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) - def test_dl_seed_with_noise(self): + def test_dl_seed_with_noise(self) -> None: model1, optim1, dl1 = self._init_dp_training(secure_mode=False, dl_seed=96) _epoch(model1, optim1, dl1) @@ -351,7 +351,7 @@ def test_dl_seed_with_noise(self): self.assertFalse(torch.allclose(model1._module.weight, model2._module.weight)) - def test_dl_seed_no_noise(self): + def test_dl_seed_no_noise(self) -> None: model1, optim1, dl1 = self._init_dp_training( secure_mode=False, dl_seed=96, noise=0.0 ) @@ -364,7 +364,7 @@ def test_dl_seed_no_noise(self): self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) - def test_seed(self): + def test_seed(self) -> None: model1, optim1, dl1 = self._init_dp_training( secure_mode=False, dl_seed=96, noise_seed=17 ) @@ -377,7 +377,7 @@ def test_seed(self): self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) - def test_custom_and_global_seed(self): + def test_custom_and_global_seed(self) -> None: model1, optim1, dl1 = self._init_dp_training( secure_mode=False, dl_seed=96, noise_seed=17 ) @@ -392,7 +392,7 @@ def test_custom_and_global_seed(self): self.assertTrue(torch.allclose(model1._module.weight, model2._module.weight)) - def test_data_seed_consistency(self): + def test_data_seed_consistency(self) -> None: _, _, dl1 = self._init_dp_training( secure_mode=False, dl_seed=1337, poisson_sampling=False ) @@ -406,7 +406,7 @@ def test_data_seed_consistency(self): self.assertTrue(torch.allclose(data1, data2)) @unittest.skip("requires torchcsprng compatible with new pytorch versions") - def test_secure_mode_no_poisson(self): + def test_secure_mode_no_poisson(self) -> None: _, _, dl1 = self._init_dp_training( secure_mode=True, dl_seed=1337, poisson_sampling=False ) diff --git a/opacus/tests/validators/batch_norm_test.py b/opacus/tests/validators/batch_norm_test.py index 5c2559c4..13350cf8 100644 --- a/opacus/tests/validators/batch_norm_test.py +++ b/opacus/tests/validators/batch_norm_test.py @@ -21,7 +21,7 @@ class BatchNormValidator_test(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.bn1 = nn.BatchNorm1d(4) self.bn2 = nn.BatchNorm2d(4) self.bn3 = nn.BatchNorm3d(4) @@ -29,7 +29,7 @@ def setUp(self): self.mv = ModuleValidator.VALIDATORS self.mf = ModuleValidator.FIXERS - def test_validate(self): + def test_validate(self) -> None: val1 = self.mv[type(self.bn1)](self.bn1) val2 = self.mv[type(self.bn2)](self.bn2) val3 = self.mv[type(self.bn3)](self.bn3) @@ -45,7 +45,7 @@ def test_validate(self): self.assertTrue(isinstance(val3[0], ShouldReplaceModuleError)) self.assertTrue(isinstance(vals[0], ShouldReplaceModuleError)) - def test_fix(self): + def test_fix(self) -> None: fix1 = self.mf[type(self.bn1)](self.bn1) fix2 = self.mf[type(self.bn2)](self.bn2) fix3 = self.mf[type(self.bn3)](self.bn3) diff --git a/opacus/tests/validators/instance_norm_test.py b/opacus/tests/validators/instance_norm_test.py index 7608afd8..78cae1ec 100644 --- a/opacus/tests/validators/instance_norm_test.py +++ b/opacus/tests/validators/instance_norm_test.py @@ -21,7 +21,7 @@ class InstanceNormValidator_test(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.in1 = nn.InstanceNorm1d(4, affine=True, track_running_stats=True) self.in2 = nn.InstanceNorm2d(4, affine=False, track_running_stats=True) self.in3 = nn.InstanceNorm3d(4, affine=False, track_running_stats=True) @@ -30,7 +30,7 @@ def setUp(self): self.mv = ModuleValidator.VALIDATORS self.mf = ModuleValidator.FIXERS - def test_validate(self): + def test_validate(self) -> None: val1 = self.mv[type(self.in1)](self.in1) val2 = self.mv[type(self.in2)](self.in2) val3 = self.mv[type(self.in3)](self.in3) @@ -45,7 +45,7 @@ def test_validate(self): self.assertTrue(isinstance(val2[0], IllegalModuleConfigurationError)) self.assertTrue(isinstance(val3[0], IllegalModuleConfigurationError)) - def test_fix(self): + def test_fix(self) -> None: fix1 = self.mf[type(self.in1)](self.in1) fix2 = self.mf[type(self.in2)](self.in2) fix3 = self.mf[type(self.in3)](self.in3) diff --git a/opacus/tests/validators/lstm_test.py b/opacus/tests/validators/lstm_test.py index b3952e91..abb91bce 100644 --- a/opacus/tests/validators/lstm_test.py +++ b/opacus/tests/validators/lstm_test.py @@ -23,17 +23,17 @@ class LSTMValidator_test(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.lstm = nn.LSTM(8, 4) self.mv = ModuleValidator.VALIDATORS self.mf = ModuleValidator.FIXERS - def test_validate(self): + def test_validate(self) -> None: val_lstm = self.mv[type(self.lstm)](self.lstm) self.assertEqual(len(val_lstm), 1) self.assertTrue(isinstance(val_lstm[0], ShouldReplaceModuleError)) - def test_fix(self): + def test_fix(self) -> None: fix_lstm = self.mf[type(self.lstm)](self.lstm) self.assertTrue(isinstance(fix_lstm, DPLSTM)) self.assertTrue( diff --git a/opacus/tests/validators/multihead_attention_test.py b/opacus/tests/validators/multihead_attention_test.py index 1e815519..a5be00ae 100644 --- a/opacus/tests/validators/multihead_attention_test.py +++ b/opacus/tests/validators/multihead_attention_test.py @@ -23,17 +23,17 @@ class MultiheadAttentionValidator_test(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.mha = nn.MultiheadAttention(8, 4) self.mv = ModuleValidator.VALIDATORS self.mf = ModuleValidator.FIXERS - def test_validate(self): + def test_validate(self) -> None: val_mha = self.mv[type(self.mha)](self.mha) self.assertEqual(len(val_mha), 1) self.assertTrue(isinstance(val_mha[0], ShouldReplaceModuleError)) - def test_fix(self): + def test_fix(self) -> None: fix_mha = self.mf[type(self.mha)](self.mha) self.assertTrue(isinstance(fix_mha, DPMultiheadAttention)) self.assertTrue(