Skip to content

Commit d0290d7

Browse files
r-barnesfacebook-github-bot
authored andcommitted
Annotate some functions that return None
Summary: Test functions return None. This codemod fixes that so type annotation efforts can focus on trickier cases. Reviewed By: azad-meta Differential Revision: D52570295 fbshipit-source-id: 9a9ae509b79a9716a21c1f3ca71e342d4b0c28e7
1 parent 6887581 commit d0290d7

13 files changed

+89
-89
lines changed

opacus/tests/grad_samples/conv2d_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def test_unfold2d(
152152

153153
assert_close(X_unfold_torch, X_unfold_opacus, atol=0, rtol=0)
154154

155-
def test_asymetric_dilation_and_kernel_size(self):
155+
def test_asymetric_dilation_and_kernel_size(self) -> None:
156156
"""
157157
This test is mainly for particular use cases and can be useful for future debugging
158158
"""

opacus/tests/gradient_accumulation_test.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def name(self):
6161

6262

6363
class GradientAccumulationTest(unittest.TestCase):
64-
def setUp(self):
64+
def setUp(self) -> None:
6565
self.DATA_SIZE = 128
6666
self.BATCH_SIZE = 16
6767
self.SAMPLE_RATE = self.BATCH_SIZE / self.DATA_SIZE
@@ -73,7 +73,7 @@ def setUp(self):
7373
self.setUp_data()
7474
self.setUp_model_and_optimizer()
7575

76-
def setUp_data(self):
76+
def setUp_data(self) -> None:
7777
self.ds = FakeData(
7878
size=self.DATA_SIZE,
7979
image_size=(1, 35, 35),
@@ -84,7 +84,7 @@ def setUp_data(self):
8484
)
8585
self.dl = DataLoader(self.ds, batch_size=self.BATCH_SIZE)
8686

87-
def setUp_model_and_optimizer(self):
87+
def setUp_model_and_optimizer(self) -> None:
8888
self.model = SampleConvNet()
8989
self.optimizer = torch.optim.SGD(
9090
self.model.parameters(), lr=self.LR, momentum=0
@@ -126,7 +126,7 @@ def model_forward_backward(
126126
if num_steps == 0:
127127
break
128128

129-
def test_grad_sample_accumulation(self):
129+
def test_grad_sample_accumulation(self) -> None:
130130
"""
131131
Calling loss.backward() multiple times should sum up the gradients in .grad
132132
and accumulate all the individual gradients in .grad-sample
@@ -175,7 +175,7 @@ def test_grad_sample_accumulation(self):
175175
)
176176
self.assertTrue(torch.allclose(grad, orig_grad, atol=10e-5, rtol=10e-3))
177177

178-
def test_privacy_engine_poisson_accumulation(self):
178+
def test_privacy_engine_poisson_accumulation(self) -> None:
179179
privacy_engine = PrivacyEngine()
180180
model, optimizer, dl = privacy_engine.make_private(
181181
module=self.model,
@@ -191,7 +191,7 @@ def test_privacy_engine_poisson_accumulation(self):
191191
with self.assertRaises(ValueError):
192192
self.model_forward_backward(model, dl, num_steps=1)
193193

194-
def test_privacy_engine_no_poisson_accumulation(self):
194+
def test_privacy_engine_no_poisson_accumulation(self) -> None:
195195
privacy_engine = PrivacyEngine()
196196
model, optimizer, dl = privacy_engine.make_private(
197197
module=self.model,
@@ -225,7 +225,7 @@ def test_privacy_engine_no_poisson_accumulation(self):
225225
f"MAD is {(orig_grad - accumulated_grad).abs().mean()}",
226226
)
227227

228-
def test_privacy_engine_zero_grad(self):
228+
def test_privacy_engine_zero_grad(self) -> None:
229229
privacy_engine = PrivacyEngine()
230230
model, optimizer, dl = privacy_engine.make_private(
231231
module=self.model,
@@ -248,7 +248,7 @@ def test_privacy_engine_zero_grad(self):
248248
model, dl, optimizer, num_steps=2, do_zero_grad=False
249249
)
250250

251-
def test_batch_splitter_zero_grad(self):
251+
def test_batch_splitter_zero_grad(self) -> None:
252252
privacy_engine = PrivacyEngine()
253253
model, optimizer, dl = privacy_engine.make_private(
254254
module=self.model,
@@ -274,6 +274,6 @@ def test_batch_splitter_zero_grad(self):
274274

275275

276276
class GradientAccumulationTestFunctorch(GradientAccumulationTest):
277-
def setUp(self):
277+
def setUp(self) -> None:
278278
super().setUp()
279279
self.GRAD_SAMPLE_MODE = "functorch"

opacus/tests/module_validator_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ def forward(self, x):
136136
model.b1.bias.requires_grad = False
137137
self.assertTrue(ModuleValidator.is_valid(model))
138138

139-
def test_fix_bn_with_args(self):
139+
def test_fix_bn_with_args(self) -> None:
140140
m = nn.Sequential(
141141
OrderedDict(
142142
[

opacus/tests/multigpu_gradcheck.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def run_demo(demo_fn, weight, world_size, dp, clipping, grad_sample_mode):
140140

141141

142142
class GradientComputationTest(unittest.TestCase):
143-
def test_gradient_correct(self):
143+
def test_gradient_correct(self) -> None:
144144
# Tests that gradient is the same with DP or with DDP
145145
n_gpus = torch.cuda.device_count()
146146
self.assertTrue(

opacus/tests/poisson_test.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,19 +42,19 @@ def setUp(self) -> None:
4242

4343
self.sampler, self.dataloader = self._init_data(seed=7)
4444

45-
def test_length(self):
45+
def test_length(self) -> None:
4646
self.assertEqual(len(self.sampler), 10)
4747
self.assertEqual(len(self.dataloader), 10)
4848

49-
def test_batch_sizes(self):
49+
def test_batch_sizes(self) -> None:
5050
batch_sizes = []
5151
for x, _y in self.dataloader:
5252
batch_sizes.append(x.shape[0])
5353

5454
self.assertGreater(len(set(batch_sizes)), 1)
5555
self.assertAlmostEqual(np.mean(batch_sizes), self.batch_size, delta=2)
5656

57-
def test_same_seed(self):
57+
def test_same_seed(self) -> None:
5858
batch_sizes1 = []
5959
for x, _y in self.dataloader:
6060
batch_sizes1.append(x.shape[0])
@@ -66,7 +66,7 @@ def test_same_seed(self):
6666

6767
self.assertEqual(batch_sizes1, batch_sizes2)
6868

69-
def test_different_seed(self):
69+
def test_different_seed(self) -> None:
7070
batch_sizes1 = []
7171
for x, _y in self.dataloader:
7272
batch_sizes1.append(x.shape[0])

opacus/tests/privacy_engine_test.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,11 @@ def setUp(self):
7373
torch.manual_seed(42)
7474

7575
@abc.abstractmethod
76-
def _init_data(self):
76+
def _init_data(self) -> None:
7777
pass
7878

7979
@abc.abstractmethod
80-
def _init_model(self):
80+
def _init_model(self) -> None:
8181
pass
8282

8383
def _init_vanilla_training(
@@ -193,7 +193,7 @@ def closure():
193193
if max_steps and steps >= max_steps:
194194
break
195195

196-
def test_basic(self):
196+
def test_basic(self) -> None:
197197
for opt_exclude_frozen in [True, False]:
198198
with self.subTest(opt_exclude_frozen=opt_exclude_frozen):
199199
model, optimizer, dl, _ = self._init_private_training(
@@ -287,7 +287,7 @@ def test_compare_to_vanilla(
287287
max_steps=max_steps,
288288
)
289289

290-
def test_flat_clipping(self):
290+
def test_flat_clipping(self) -> None:
291291
self.BATCH_SIZE = 1
292292
max_grad_norm = 0.5
293293

@@ -314,7 +314,7 @@ def test_flat_clipping(self):
314314
self.assertAlmostEqual(clipped_grads.norm().item(), max_grad_norm, places=3)
315315
self.assertGreater(non_clipped_grads.norm(), clipped_grads.norm())
316316

317-
def test_per_layer_clipping(self):
317+
def test_per_layer_clipping(self) -> None:
318318
self.BATCH_SIZE = 1
319319
max_grad_norm_per_layer = 1.0
320320

@@ -344,7 +344,7 @@ def test_per_layer_clipping(self):
344344
min(non_clipped_norm, max_grad_norm_per_layer), clipped_norm, places=3
345345
)
346346

347-
def test_sample_grad_aggregation(self):
347+
def test_sample_grad_aggregation(self) -> None:
348348
"""
349349
Check if final gradient is indeed an aggregation over per-sample gradients
350350
"""
@@ -367,7 +367,7 @@ def test_sample_grad_aggregation(self):
367367
f"Param: {p_name}",
368368
)
369369

370-
def test_noise_changes_every_time(self):
370+
def test_noise_changes_every_time(self) -> None:
371371
"""
372372
Test that adding noise results in ever different model params.
373373
We disable clipping in this test by setting it to a very high threshold.
@@ -387,7 +387,7 @@ def test_noise_changes_every_time(self):
387387
for p0, p1 in zip(first_run_params, second_run_params):
388388
self.assertFalse(torch.allclose(p0, p1))
389389

390-
def test_get_compatible_module_inaction(self):
390+
def test_get_compatible_module_inaction(self) -> None:
391391
needs_no_replacement_module = nn.Linear(1, 2)
392392
fixed_module = PrivacyEngine.get_compatible_module(needs_no_replacement_module)
393393
self.assertFalse(fixed_module is needs_no_replacement_module)
@@ -397,7 +397,7 @@ def test_get_compatible_module_inaction(self):
397397
)
398398
)
399399

400-
def test_model_validator(self):
400+
def test_model_validator(self) -> None:
401401
"""
402402
Test that the privacy engine raises errors
403403
if there are unsupported modules
@@ -416,7 +416,7 @@ def test_model_validator(self):
416416
grad_sample_mode=self.GRAD_SAMPLE_MODE,
417417
)
418418

419-
def test_model_validator_after_fix(self):
419+
def test_model_validator_after_fix(self) -> None:
420420
"""
421421
Test that the privacy engine fixes unsupported modules
422422
and succeeds.
@@ -435,7 +435,7 @@ def test_model_validator_after_fix(self):
435435
)
436436
self.assertTrue(1, 1)
437437

438-
def test_make_private_with_epsilon(self):
438+
def test_make_private_with_epsilon(self) -> None:
439439
model, optimizer, dl = self._init_vanilla_training()
440440
target_eps = 2.0
441441
target_delta = 1e-5
@@ -458,7 +458,7 @@ def test_make_private_with_epsilon(self):
458458
target_eps, privacy_engine.get_epsilon(target_delta), places=2
459459
)
460460

461-
def test_deterministic_run(self):
461+
def test_deterministic_run(self) -> None:
462462
"""
463463
Tests that for 2 different models, secure seed can be fixed
464464
to produce same (deterministic) runs.
@@ -483,7 +483,7 @@ def test_deterministic_run(self):
483483
"Model parameters after deterministic run must match",
484484
)
485485

486-
def test_validator_weight_update_check(self):
486+
def test_validator_weight_update_check(self) -> None:
487487
"""
488488
Test that the privacy engine raises error if ModuleValidator.fix(model) is
489489
called after the optimizer is created
@@ -522,7 +522,7 @@ def test_validator_weight_update_check(self):
522522
grad_sample_mode=self.GRAD_SAMPLE_MODE,
523523
)
524524

525-
def test_parameters_match(self):
525+
def test_parameters_match(self) -> None:
526526
dl = self._init_data()
527527

528528
m1 = self._init_model()
@@ -721,7 +721,7 @@ def helper_test_noise_level(
721721

722722
@unittest.skip("requires torchcsprng compatible with new pytorch versions")
723723
@patch("torch.normal", MagicMock(return_value=torch.Tensor([0.6])))
724-
def test_generate_noise_in_secure_mode(self):
724+
def test_generate_noise_in_secure_mode(self) -> None:
725725
"""
726726
Tests that the noise is added correctly in secure_mode,
727727
according to section 5.1 in https://arxiv.org/abs/2107.10138.
@@ -803,16 +803,16 @@ def _init_model(self):
803803

804804

805805
class PrivacyEngineConvNetEmptyBatchTest(PrivacyEngineConvNetTest):
806-
def setUp(self):
806+
def setUp(self) -> None:
807807
super().setUp()
808808

809809
# This will trigger multiple empty batches with poisson sampling enabled
810810
self.BATCH_SIZE = 1
811811

812-
def test_checkpoints(self):
812+
def test_checkpoints(self) -> None:
813813
pass
814814

815-
def test_noise_level(self):
815+
def test_noise_level(self) -> None:
816816
pass
817817

818818

@@ -837,23 +837,23 @@ def _init_model(self):
837837

838838

839839
class PrivacyEngineConvNetFrozenTestFunctorch(PrivacyEngineConvNetFrozenTest):
840-
def setUp(self):
840+
def setUp(self) -> None:
841841
super().setUp()
842842
self.GRAD_SAMPLE_MODE = "functorch"
843843

844844

845845
class PrivacyEngineConvNetTestExpandedWeights(PrivacyEngineConvNetTest):
846-
def setUp(self):
846+
def setUp(self) -> None:
847847
super().setUp()
848848
self.GRAD_SAMPLE_MODE = "ew"
849849

850850
@unittest.skip("Original p.grad is not available in ExpandedWeights")
851-
def test_sample_grad_aggregation(self):
851+
def test_sample_grad_aggregation(self) -> None:
852852
pass
853853

854854

855855
class PrivacyEngineConvNetTestFunctorch(PrivacyEngineConvNetTest):
856-
def setUp(self):
856+
def setUp(self) -> None:
857857
super().setUp()
858858
self.GRAD_SAMPLE_MODE = "functorch"
859859

@@ -938,7 +938,7 @@ def _init_model(
938938

939939

940940
class PrivacyEngineTextTestFunctorch(PrivacyEngineTextTest):
941-
def setUp(self):
941+
def setUp(self) -> None:
942942
super().setUp()
943943
self.GRAD_SAMPLE_MODE = "functorch"
944944

@@ -987,7 +987,7 @@ def _init_model(self):
987987

988988

989989
class PrivacyEngineTiedWeightsTestFunctorch(PrivacyEngineTiedWeightsTest):
990-
def setUp(self):
990+
def setUp(self) -> None:
991991
super().setUp()
992992
self.GRAD_SAMPLE_MODE = "functorch"
993993

0 commit comments

Comments
 (0)