Skip to content

Commit

Permalink
Annotate some functions that return None
Browse files Browse the repository at this point in the history
Summary: Test functions return None. This codemod fixes that so type annotation efforts can focus on trickier cases.

Reviewed By: azad-meta

Differential Revision: D52570295

fbshipit-source-id: 9a9ae509b79a9716a21c1f3ca71e342d4b0c28e7
  • Loading branch information
r-barnes authored and facebook-github-bot committed Jan 5, 2024
1 parent 6887581 commit d0290d7
Show file tree
Hide file tree
Showing 13 changed files with 89 additions and 89 deletions.
2 changes: 1 addition & 1 deletion opacus/tests/grad_samples/conv2d_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def test_unfold2d(

assert_close(X_unfold_torch, X_unfold_opacus, atol=0, rtol=0)

def test_asymetric_dilation_and_kernel_size(self):
def test_asymetric_dilation_and_kernel_size(self) -> None:
"""
This test is mainly for particular use cases and can be useful for future debugging
"""
Expand Down
18 changes: 9 additions & 9 deletions opacus/tests/gradient_accumulation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def name(self):


class GradientAccumulationTest(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.DATA_SIZE = 128
self.BATCH_SIZE = 16
self.SAMPLE_RATE = self.BATCH_SIZE / self.DATA_SIZE
Expand All @@ -73,7 +73,7 @@ def setUp(self):
self.setUp_data()
self.setUp_model_and_optimizer()

def setUp_data(self):
def setUp_data(self) -> None:
self.ds = FakeData(
size=self.DATA_SIZE,
image_size=(1, 35, 35),
Expand All @@ -84,7 +84,7 @@ def setUp_data(self):
)
self.dl = DataLoader(self.ds, batch_size=self.BATCH_SIZE)

def setUp_model_and_optimizer(self):
def setUp_model_and_optimizer(self) -> None:
self.model = SampleConvNet()
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=self.LR, momentum=0
Expand Down Expand Up @@ -126,7 +126,7 @@ def model_forward_backward(
if num_steps == 0:
break

def test_grad_sample_accumulation(self):
def test_grad_sample_accumulation(self) -> None:
"""
Calling loss.backward() multiple times should sum up the gradients in .grad
and accumulate all the individual gradients in .grad-sample
Expand Down Expand Up @@ -175,7 +175,7 @@ def test_grad_sample_accumulation(self):
)
self.assertTrue(torch.allclose(grad, orig_grad, atol=10e-5, rtol=10e-3))

def test_privacy_engine_poisson_accumulation(self):
def test_privacy_engine_poisson_accumulation(self) -> None:
privacy_engine = PrivacyEngine()
model, optimizer, dl = privacy_engine.make_private(
module=self.model,
Expand All @@ -191,7 +191,7 @@ def test_privacy_engine_poisson_accumulation(self):
with self.assertRaises(ValueError):
self.model_forward_backward(model, dl, num_steps=1)

def test_privacy_engine_no_poisson_accumulation(self):
def test_privacy_engine_no_poisson_accumulation(self) -> None:
privacy_engine = PrivacyEngine()
model, optimizer, dl = privacy_engine.make_private(
module=self.model,
Expand Down Expand Up @@ -225,7 +225,7 @@ def test_privacy_engine_no_poisson_accumulation(self):
f"MAD is {(orig_grad - accumulated_grad).abs().mean()}",
)

def test_privacy_engine_zero_grad(self):
def test_privacy_engine_zero_grad(self) -> None:
privacy_engine = PrivacyEngine()
model, optimizer, dl = privacy_engine.make_private(
module=self.model,
Expand All @@ -248,7 +248,7 @@ def test_privacy_engine_zero_grad(self):
model, dl, optimizer, num_steps=2, do_zero_grad=False
)

def test_batch_splitter_zero_grad(self):
def test_batch_splitter_zero_grad(self) -> None:
privacy_engine = PrivacyEngine()
model, optimizer, dl = privacy_engine.make_private(
module=self.model,
Expand All @@ -274,6 +274,6 @@ def test_batch_splitter_zero_grad(self):


class GradientAccumulationTestFunctorch(GradientAccumulationTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "functorch"
2 changes: 1 addition & 1 deletion opacus/tests/module_validator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def forward(self, x):
model.b1.bias.requires_grad = False
self.assertTrue(ModuleValidator.is_valid(model))

def test_fix_bn_with_args(self):
def test_fix_bn_with_args(self) -> None:
m = nn.Sequential(
OrderedDict(
[
Expand Down
2 changes: 1 addition & 1 deletion opacus/tests/multigpu_gradcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def run_demo(demo_fn, weight, world_size, dp, clipping, grad_sample_mode):


class GradientComputationTest(unittest.TestCase):
def test_gradient_correct(self):
def test_gradient_correct(self) -> None:
# Tests that gradient is the same with DP or with DDP
n_gpus = torch.cuda.device_count()
self.assertTrue(
Expand Down
8 changes: 4 additions & 4 deletions opacus/tests/poisson_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,19 @@ def setUp(self) -> None:

self.sampler, self.dataloader = self._init_data(seed=7)

def test_length(self):
def test_length(self) -> None:
self.assertEqual(len(self.sampler), 10)
self.assertEqual(len(self.dataloader), 10)

def test_batch_sizes(self):
def test_batch_sizes(self) -> None:
batch_sizes = []
for x, _y in self.dataloader:
batch_sizes.append(x.shape[0])

self.assertGreater(len(set(batch_sizes)), 1)
self.assertAlmostEqual(np.mean(batch_sizes), self.batch_size, delta=2)

def test_same_seed(self):
def test_same_seed(self) -> None:
batch_sizes1 = []
for x, _y in self.dataloader:
batch_sizes1.append(x.shape[0])
Expand All @@ -66,7 +66,7 @@ def test_same_seed(self):

self.assertEqual(batch_sizes1, batch_sizes2)

def test_different_seed(self):
def test_different_seed(self) -> None:
batch_sizes1 = []
for x, _y in self.dataloader:
batch_sizes1.append(x.shape[0])
Expand Down
48 changes: 24 additions & 24 deletions opacus/tests/privacy_engine_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ def setUp(self):
torch.manual_seed(42)

@abc.abstractmethod
def _init_data(self):
def _init_data(self) -> None:
pass

@abc.abstractmethod
def _init_model(self):
def _init_model(self) -> None:
pass

def _init_vanilla_training(
Expand Down Expand Up @@ -193,7 +193,7 @@ def closure():
if max_steps and steps >= max_steps:
break

def test_basic(self):
def test_basic(self) -> None:
for opt_exclude_frozen in [True, False]:
with self.subTest(opt_exclude_frozen=opt_exclude_frozen):
model, optimizer, dl, _ = self._init_private_training(
Expand Down Expand Up @@ -287,7 +287,7 @@ def test_compare_to_vanilla(
max_steps=max_steps,
)

def test_flat_clipping(self):
def test_flat_clipping(self) -> None:
self.BATCH_SIZE = 1
max_grad_norm = 0.5

Expand All @@ -314,7 +314,7 @@ def test_flat_clipping(self):
self.assertAlmostEqual(clipped_grads.norm().item(), max_grad_norm, places=3)
self.assertGreater(non_clipped_grads.norm(), clipped_grads.norm())

def test_per_layer_clipping(self):
def test_per_layer_clipping(self) -> None:
self.BATCH_SIZE = 1
max_grad_norm_per_layer = 1.0

Expand Down Expand Up @@ -344,7 +344,7 @@ def test_per_layer_clipping(self):
min(non_clipped_norm, max_grad_norm_per_layer), clipped_norm, places=3
)

def test_sample_grad_aggregation(self):
def test_sample_grad_aggregation(self) -> None:
"""
Check if final gradient is indeed an aggregation over per-sample gradients
"""
Expand All @@ -367,7 +367,7 @@ def test_sample_grad_aggregation(self):
f"Param: {p_name}",
)

def test_noise_changes_every_time(self):
def test_noise_changes_every_time(self) -> None:
"""
Test that adding noise results in ever different model params.
We disable clipping in this test by setting it to a very high threshold.
Expand All @@ -387,7 +387,7 @@ def test_noise_changes_every_time(self):
for p0, p1 in zip(first_run_params, second_run_params):
self.assertFalse(torch.allclose(p0, p1))

def test_get_compatible_module_inaction(self):
def test_get_compatible_module_inaction(self) -> None:
needs_no_replacement_module = nn.Linear(1, 2)
fixed_module = PrivacyEngine.get_compatible_module(needs_no_replacement_module)
self.assertFalse(fixed_module is needs_no_replacement_module)
Expand All @@ -397,7 +397,7 @@ def test_get_compatible_module_inaction(self):
)
)

def test_model_validator(self):
def test_model_validator(self) -> None:
"""
Test that the privacy engine raises errors
if there are unsupported modules
Expand All @@ -416,7 +416,7 @@ def test_model_validator(self):
grad_sample_mode=self.GRAD_SAMPLE_MODE,
)

def test_model_validator_after_fix(self):
def test_model_validator_after_fix(self) -> None:
"""
Test that the privacy engine fixes unsupported modules
and succeeds.
Expand All @@ -435,7 +435,7 @@ def test_model_validator_after_fix(self):
)
self.assertTrue(1, 1)

def test_make_private_with_epsilon(self):
def test_make_private_with_epsilon(self) -> None:
model, optimizer, dl = self._init_vanilla_training()
target_eps = 2.0
target_delta = 1e-5
Expand All @@ -458,7 +458,7 @@ def test_make_private_with_epsilon(self):
target_eps, privacy_engine.get_epsilon(target_delta), places=2
)

def test_deterministic_run(self):
def test_deterministic_run(self) -> None:
"""
Tests that for 2 different models, secure seed can be fixed
to produce same (deterministic) runs.
Expand All @@ -483,7 +483,7 @@ def test_deterministic_run(self):
"Model parameters after deterministic run must match",
)

def test_validator_weight_update_check(self):
def test_validator_weight_update_check(self) -> None:
"""
Test that the privacy engine raises error if ModuleValidator.fix(model) is
called after the optimizer is created
Expand Down Expand Up @@ -522,7 +522,7 @@ def test_validator_weight_update_check(self):
grad_sample_mode=self.GRAD_SAMPLE_MODE,
)

def test_parameters_match(self):
def test_parameters_match(self) -> None:
dl = self._init_data()

m1 = self._init_model()
Expand Down Expand Up @@ -721,7 +721,7 @@ def helper_test_noise_level(

@unittest.skip("requires torchcsprng compatible with new pytorch versions")
@patch("torch.normal", MagicMock(return_value=torch.Tensor([0.6])))
def test_generate_noise_in_secure_mode(self):
def test_generate_noise_in_secure_mode(self) -> None:
"""
Tests that the noise is added correctly in secure_mode,
according to section 5.1 in https://arxiv.org/abs/2107.10138.
Expand Down Expand Up @@ -803,16 +803,16 @@ def _init_model(self):


class PrivacyEngineConvNetEmptyBatchTest(PrivacyEngineConvNetTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()

# This will trigger multiple empty batches with poisson sampling enabled
self.BATCH_SIZE = 1

def test_checkpoints(self):
def test_checkpoints(self) -> None:
pass

def test_noise_level(self):
def test_noise_level(self) -> None:
pass


Expand All @@ -837,23 +837,23 @@ def _init_model(self):


class PrivacyEngineConvNetFrozenTestFunctorch(PrivacyEngineConvNetFrozenTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "functorch"


class PrivacyEngineConvNetTestExpandedWeights(PrivacyEngineConvNetTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "ew"

@unittest.skip("Original p.grad is not available in ExpandedWeights")
def test_sample_grad_aggregation(self):
def test_sample_grad_aggregation(self) -> None:
pass


class PrivacyEngineConvNetTestFunctorch(PrivacyEngineConvNetTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "functorch"

Expand Down Expand Up @@ -938,7 +938,7 @@ def _init_model(


class PrivacyEngineTextTestFunctorch(PrivacyEngineTextTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "functorch"

Expand Down Expand Up @@ -987,7 +987,7 @@ def _init_model(self):


class PrivacyEngineTiedWeightsTestFunctorch(PrivacyEngineTiedWeightsTest):
def setUp(self):
def setUp(self) -> None:
super().setUp()
self.GRAD_SAMPLE_MODE = "functorch"

Expand Down
Loading

0 comments on commit d0290d7

Please sign in to comment.