Skip to content

Commit

Permalink
Annotate some functions that return None
Browse files Browse the repository at this point in the history
Summary: Test functions return None. This codemod fixes that so type annotation efforts can focus on trickier cases.

Reviewed By: azad-meta

Differential Revision: D52570248

fbshipit-source-id: b20e5ec6cde1132d4e1f954af1e012d8464343c8
  • Loading branch information
r-barnes authored and facebook-github-bot committed Jan 5, 2024
1 parent da05f77 commit 6887581
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion examples/mnist_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def test_step(self, batch, batch_idx):
self.log("test_accuracy", self.test_accuracy, on_step=False, on_epoch=True)
return loss

def on_train_epoch_end(self):
def on_train_epoch_end(self) -> None:
# Logging privacy spent: (epsilon, delta)
epsilon = self.privacy_engine.get_epsilon(self.delta)
self.log("epsilon", epsilon, on_epoch=True, prog_bar=True)
Expand Down
20 changes: 10 additions & 10 deletions opacus/tests/accountants_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@


class AccountingTest(unittest.TestCase):
def test_rdp_accountant(self):
def test_rdp_accountant(self) -> None:
noise_multiplier = 1.5
sample_rate = 0.04
steps = int(90 / 0.04)
Expand All @@ -39,7 +39,7 @@ def test_rdp_accountant(self):
epsilon = accountant.get_epsilon(delta=1e-5)
self.assertAlmostEqual(epsilon, 7.32911117143)

def test_gdp_accountant(self):
def test_gdp_accountant(self) -> None:
noise_multiplier = 1.5
sample_rate = 0.04
steps = int(90 // 0.04)
Expand All @@ -52,7 +52,7 @@ def test_gdp_accountant(self):
self.assertLess(6.59, epsilon)
self.assertLess(epsilon, 6.6)

def test_prv_accountant(self):
def test_prv_accountant(self) -> None:
noise_multiplier = 1.5
sample_rate = 0.04
steps = int(90 // 0.04)
Expand All @@ -65,7 +65,7 @@ def test_prv_accountant(self):
epsilon = accountant.get_epsilon(delta=1e-5)
self.assertAlmostEqual(epsilon, 6.777395712150674)

def test_get_noise_multiplier_rdp_epochs(self):
def test_get_noise_multiplier_rdp_epochs(self) -> None:
delta = 1e-5
sample_rate = 0.04
epsilon = 8
Expand All @@ -81,7 +81,7 @@ def test_get_noise_multiplier_rdp_epochs(self):

self.assertAlmostEqual(noise_multiplier, 1.416, places=4)

def test_get_noise_multiplier_rdp_steps(self):
def test_get_noise_multiplier_rdp_steps(self) -> None:
delta = 1e-5
sample_rate = 0.04
epsilon = 8
Expand All @@ -96,7 +96,7 @@ def test_get_noise_multiplier_rdp_steps(self):

self.assertAlmostEqual(noise_multiplier, 1.3562, places=4)

def test_get_noise_multiplier_prv_epochs(self):
def test_get_noise_multiplier_prv_epochs(self) -> None:
delta = 1e-5
sample_rate = 0.04
epsilon = 8
Expand All @@ -112,7 +112,7 @@ def test_get_noise_multiplier_prv_epochs(self):

self.assertAlmostEqual(noise_multiplier, 1.34765625, places=4)

def test_get_noise_multiplier_prv_steps(self):
def test_get_noise_multiplier_prv_steps(self) -> None:
delta = 1e-5
sample_rate = 0.04
epsilon = 8
Expand Down Expand Up @@ -153,7 +153,7 @@ def test_get_noise_multiplier_overshoot(self, epsilon, epochs, sample_rate, delt
actual_epsilon = accountant.get_epsilon(delta=delta)
self.assertLess(actual_epsilon, epsilon)

def test_get_noise_multiplier_gdp(self):
def test_get_noise_multiplier_gdp(self) -> None:
delta = 1e-5
sample_rate = 0.04
epsilon = 8
Expand All @@ -169,7 +169,7 @@ def test_get_noise_multiplier_gdp(self):

self.assertAlmostEqual(noise_multiplier, 1.3232421875)

def test_accountant_state_dict(self):
def test_accountant_state_dict(self) -> None:
noise_multiplier = 1.5
sample_rate = 0.04
steps = int(90 / 0.04)
Expand All @@ -191,7 +191,7 @@ def test_accountant_state_dict(self):
accountant.state_dict(dummy_dest)["dummy_k"], dummy_dest["dummy_k"]
)

def test_accountant_load_state_dict(self):
def test_accountant_load_state_dict(self) -> None:
noise_multiplier = 1.5
sample_rate = 0.04
steps = int(90 / 0.04)
Expand Down
4 changes: 2 additions & 2 deletions opacus/tests/batch_memory_manager_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def test_empty_batch(
)
weights_before = torch.clone(model._module.fc.weight)

def test_equivalent_to_one_batch(self):
def test_equivalent_to_one_batch(self) -> None:
torch.manual_seed(1337)
model, optimizer, data_loader = self._init_training()

Expand Down Expand Up @@ -229,7 +229,7 @@ def test_equivalent_to_one_batch(self):
class BatchMemoryManagerTestWithExpandedWeights(BatchMemoryManagerTest):
GSM_MODE = "ew"

def test_empty_batch(self):
def test_empty_batch(self) -> None:
pass


Expand Down
8 changes: 4 additions & 4 deletions opacus/tests/ddp_hook_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,15 +255,15 @@ def run_function(local_function, tensor, dp, noise_multiplier=0, max_grad_norm=1


class GradientComputationTest(unittest.TestCase):
def test_connection(self):
def test_connection(self) -> None:
tensor = torch.zeros(10, 10)
world_size = run_function(debug, tensor, dp=True)

self.assertTrue(
world_size >= 2, f"Need at least 2 gpus but was provided only {world_size}."
)

def test_gradient_noclip_zeronoise(self):
def test_gradient_noclip_zeronoise(self) -> None:
# Tests that gradient is the same with DP or with DDP
weight_dp, weight_nodp = torch.zeros(10, 10), torch.zeros(10, 10)

Expand All @@ -272,7 +272,7 @@ def test_gradient_noclip_zeronoise(self):

self.assertTrue(torch.norm(weight_dp - weight_nodp) < 1e-7)

def test_ddp_hook(self):
def test_ddp_hook(self) -> None:
# Tests that the DDP hook does the same thing as naive aggregation with per layer clipping
weight_ddp_naive, weight_ddp_hook = torch.zeros(10, 10), torch.zeros(10, 10)

Expand All @@ -297,7 +297,7 @@ def test_ddp_hook(self):
f"DDP naive: {weight_ddp_naive}\nDDP hook: {weight_ddp_hook}",
)

def test_add_remove_ddp_hooks(self):
def test_add_remove_ddp_hooks(self) -> None:
remaining_hooks = {
"attached": None,
"detached": None,
Expand Down
6 changes: 3 additions & 3 deletions opacus/tests/distributed_poisson_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ def setUp(self) -> None:

self.samplers, self.dataloaders = self._init_data(seed=7)

def test_length(self):
def test_length(self) -> None:
for sampler in self.samplers:
self.assertEqual(len(sampler), 10)
for dataloader in self.dataloaders:
self.assertEqual(len(dataloader), 10)

def test_batch_sizes(self):
def test_batch_sizes(self) -> None:
for dataloader in self.dataloaders:
batch_sizes = []
for x, _y in dataloader:
Expand All @@ -71,7 +71,7 @@ def test_batch_sizes(self):
np.mean(batch_sizes), self.batch_size // self.world_size, delta=2
)

def test_separate_batches(self):
def test_separate_batches(self) -> None:
indices = {
rank: [i.item() for batch in self.samplers[rank] for i in batch]
for rank in range(self.world_size)
Expand Down
8 changes: 4 additions & 4 deletions opacus/tests/dpdataloader_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@


class DPDataLoaderTest(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.data_size = 10
self.dimension = 7
self.num_classes = 11

def test_collate_classes(self):
def test_collate_classes(self) -> None:
x = torch.randn(self.data_size, self.dimension)
y = torch.randint(low=0, high=self.num_classes, size=(self.data_size,))

Expand All @@ -36,7 +36,7 @@ def test_collate_classes(self):
self.assertEqual(x_b.size(0), 0)
self.assertEqual(y_b.size(0), 0)

def test_collate_tensor(self):
def test_collate_tensor(self) -> None:
x = torch.randn(self.data_size, self.dimension)

dataset = TensorDataset(x)
Expand All @@ -46,7 +46,7 @@ def test_collate_tensor(self):

self.assertEqual(s.size(0), 0)

def test_drop_last_true(self):
def test_drop_last_true(self) -> None:
x = torch.randn(self.data_size, self.dimension)

dataset = TensorDataset(x)
Expand Down
14 changes: 7 additions & 7 deletions opacus/tests/grad_sample_module_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,26 +225,26 @@ def forward(self, x: torch.Tensor):
register_grad_sampler(SimpleLinear)(compute_linear_grad_sample)
GradSampleModule(SimpleLinear(4, 2))

def test_custom_module_validation(self):
def test_custom_module_validation(self) -> None:
with self.assertRaises(NotImplementedError):
GradSampleModule(mobilenet_v3_small())

def test_submodule_access(self):
def test_submodule_access(self) -> None:
_ = self.grad_sample_module.fc1
_ = self.grad_sample_module.fc2

with self.assertRaises(AttributeError):
_ = self.grad_sample_module.fc3

def test_state_dict(self):
def test_state_dict(self) -> None:
gs_state_dict = self.grad_sample_module.state_dict()
og_state_dict = self.original_model.state_dict()
# check wrapped module state dict
for key in og_state_dict.keys():
self.assertTrue(f"_module.{key}" in gs_state_dict)
assert_close(og_state_dict[key], gs_state_dict[f"_module.{key}"])

def test_load_state_dict(self):
def test_load_state_dict(self) -> None:
gs_state_dict = self.grad_sample_module.state_dict()
new_gs = GradSampleModule(
SampleConvNet(), batch_first=False, loss_reduction="mean"
Expand All @@ -261,11 +261,11 @@ def test_load_state_dict(self):
class EWGradSampleModuleTest(GradSampleModuleTest):
CLS = GradSampleModuleExpandedWeights

def test_remove_hooks(self):
def test_remove_hooks(self) -> None:
pass

def test_enable_hooks(self):
def test_enable_hooks(self) -> None:
pass

def test_disable_hooks(self):
def test_disable_hooks(self) -> None:
pass

0 comments on commit 6887581

Please sign in to comment.