diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9b9057d794ce1..a377ed2426356 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,8 +27,7 @@ repos: hooks: - id: end-of-file-fixer - id: trailing-whitespace - # keep formatting in README flexible - exclude: README.md + exclude: README.md # keep formatting in README flexible - id: check-json - id: check-yaml - id: check-toml diff --git a/pyproject.toml b/pyproject.toml index e6d08411b0f35..747b6c4a3a623 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,9 +93,6 @@ ignore = [ "S101", # todo: Use of `assert` detected "S105", "S106", "S107", # todo: Possible hardcoded password: ... "S113", # todo: Probable use of requests call without timeout - "S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue - "S324", # todo: Probable use of insecure hash functions in `hashlib` - "S403", # todo: `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure "S404", # todo: `subprocess` module is possibly insecure "S602", # todo: `subprocess` call with `shell=True` identified, security issue "S603", # todo: `subprocess` call: check for execution of untrusted input @@ -106,20 +103,15 @@ ignore = [ ] "tests/**" = [ "S101", # Use of `assert` detected - "S105", "S106", # todo: Possible hardcoded password: ... "S301", # `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue - "S113", # todo: Probable use of requests call without timeout "S311", # todo: Standard pseudo-random generators are not suitable for cryptographic purposes "S108", # todo: Probable insecure usage of temporary file or directory: "/tmp/sys-customizations-sync" - "S202", # Uses of `tarfile.extractall()` "S403", # `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure "S404", # `subprocess` module is possibly insecure "S602", # todo: `subprocess` call with `shell=True` identified, security issue "S603", # todo: `subprocess` call: check for execution of untrusted input "S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell` "S607", # todo: Starting a process with a partial executable path - "RET504", # todo:Unnecessary variable assignment before `return` statement - "PT004", # todo: Fixture `tmpdir_unittest_fixture` does not return anything, add leading underscore "PT012", # todo: `pytest.raises()` block should contain a single simple statement "PT019", # todo: Fixture `_` without value is injected as parameter, use `@pytest.mark.usefixtures` instead ] diff --git a/tests/parity_fabric/models.py b/tests/parity_fabric/models.py index 4887a4c7f7dba..f65a20460e2f7 100644 --- a/tests/parity_fabric/models.py +++ b/tests/parity_fabric/models.py @@ -60,8 +60,7 @@ def forward(self, x): x = torch.flatten(x, 1) # flatten all dimensions except batch x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x + return self.fc3(x) def get_optimizer(self): return torch.optim.SGD(self.parameters(), lr=0.0001) diff --git a/tests/tests_fabric/strategies/test_model_parallel_integration.py b/tests/tests_fabric/strategies/test_model_parallel_integration.py index 4c11fb0edcd78..18c5ad07252da 100644 --- a/tests/tests_fabric/strategies/test_model_parallel_integration.py +++ b/tests/tests_fabric/strategies/test_model_parallel_integration.py @@ -83,8 +83,7 @@ def _parallelize_feed_forward_fsdp2(model, device_mesh): def _parallelize_feed_forward_fsdp2_tp(model, device_mesh): model = _parallelize_feed_forward_tp(model, device_mesh) - model = _parallelize_feed_forward_fsdp2(model, device_mesh) - return model + return _parallelize_feed_forward_fsdp2(model, device_mesh) @RunIf(min_torch="2.4", standalone=True, min_cuda_gpus=4) diff --git a/tests/tests_pytorch/accelerators/test_xla.py b/tests/tests_pytorch/accelerators/test_xla.py index 83dace719371d..5e56d5c585c88 100644 --- a/tests/tests_pytorch/accelerators/test_xla.py +++ b/tests/tests_pytorch/accelerators/test_xla.py @@ -46,8 +46,7 @@ def __init__(self): def forward(self, x): x = self.layer_1(x) x = self.layer_2(x) - x = self.layer_3(x) - return x + return self.layer_3(x) @RunIf(tpu=True, standalone=True) @@ -230,8 +229,7 @@ def __init__(self): def forward(self, x): x = self.net_a(x) x = self.layer_2(x) - x = self.net_b(x) - return x + return self.net_b(x) @RunIf(tpu=True) diff --git a/tests/tests_pytorch/callbacks/test_lr_monitor.py b/tests/tests_pytorch/callbacks/test_lr_monitor.py index 66ce47f0e7ad4..391841e4e949c 100644 --- a/tests/tests_pytorch/callbacks/test_lr_monitor.py +++ b/tests/tests_pytorch/callbacks/test_lr_monitor.py @@ -428,8 +428,7 @@ def __init__(self): def forward(self, x): x = self.linear_a(x) - x = self.linear_b(x) - return x + return self.linear_b(x) def configure_optimizers(self): param_groups = [ @@ -603,8 +602,7 @@ def __init__(self, lr, momentum): def forward(self, x): x = self.linear_a(x) - x = self.linear_b(x) - return x + return self.linear_b(x) def configure_optimizers(self): param_groups = [ diff --git a/tests/tests_pytorch/callbacks/test_spike.py b/tests/tests_pytorch/callbacks/test_spike.py index 86e3ac88e93cf..20679c52394c0 100644 --- a/tests/tests_pytorch/callbacks/test_spike.py +++ b/tests/tests_pytorch/callbacks/test_spike.py @@ -29,8 +29,7 @@ def training_step(self, batch, batch_idx: int): if curr_loss_val is None: curr_loss_val = batch_idx - loss = self.layer(torch.tensor(curr_loss_val, device=self.device, dtype=self.dtype).view(1, 1)) - return loss + return self.layer(torch.tensor(curr_loss_val, device=self.device, dtype=self.dtype).view(1, 1)) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=1e-3) diff --git a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py index abcd302149fcf..df2df72b18c1f 100644 --- a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py +++ b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py @@ -266,8 +266,7 @@ def __init__(self): def forward(self, x): x = self.layer1(x) - x = self.layer2(x) - return x + return self.layer2(x) def configure_optimizers(self): params = [{"params": self.layer1.parameters(), "lr": 0.1}, {"params": self.layer2.parameters(), "lr": 0.2}] diff --git a/tests/tests_pytorch/helpers/advanced_models.py b/tests/tests_pytorch/helpers/advanced_models.py index 959e6e5968d18..3426c3e51f41d 100644 --- a/tests/tests_pytorch/helpers/advanced_models.py +++ b/tests/tests_pytorch/helpers/advanced_models.py @@ -46,8 +46,7 @@ def block(in_feat, out_feat, normalize=True): def forward(self, z): img = self.model(z) - img = img.view(img.size(0), *self.img_shape) - return img + return img.view(img.size(0), *self.img_shape) class Discriminator(nn.Module): @@ -204,8 +203,7 @@ def forward(self, x): x = torch.tanh(x) x = self.c_d1_bn(x) x = self.c_d1_drop(x) - x = self.c_d2(x) - return x + return self.c_d2(x) def training_step(self, batch, batch_nb): x, y = batch diff --git a/tests/tests_pytorch/helpers/simple_models.py b/tests/tests_pytorch/helpers/simple_models.py index a9dc635bba275..49f931ed14cba 100644 --- a/tests/tests_pytorch/helpers/simple_models.py +++ b/tests/tests_pytorch/helpers/simple_models.py @@ -100,8 +100,7 @@ def forward(self, x): x = self.layer_1a(x) x = self.layer_2(x) x = self.layer_2a(x) - x = self.layer_end(x) - return x + return self.layer_end(x) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=0.01) diff --git a/tests/tests_pytorch/models/test_hparams.py b/tests/tests_pytorch/models/test_hparams.py index 92a07f0a3d05e..d354c8c1e16ef 100644 --- a/tests/tests_pytorch/models/test_hparams.py +++ b/tests/tests_pytorch/models/test_hparams.py @@ -417,8 +417,7 @@ def _raw_checkpoint_path(trainer) -> str: raw_checkpoint_paths = [x for x in raw_checkpoint_paths if ".ckpt" in x] assert raw_checkpoint_paths raw_checkpoint_path = raw_checkpoint_paths[0] - raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path) - return raw_checkpoint_path + return os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path) @pytest.mark.parametrize("base_class", [HyperparametersMixin, LightningModule, LightningDataModule]) diff --git a/tests/tests_pytorch/plugins/test_amp_plugins.py b/tests/tests_pytorch/plugins/test_amp_plugins.py index 0b68c098cc713..b345e9e1bee8f 100644 --- a/tests/tests_pytorch/plugins/test_amp_plugins.py +++ b/tests/tests_pytorch/plugins/test_amp_plugins.py @@ -165,8 +165,7 @@ def __init__(self): def forward(self, x: Tensor): x = self.layer1(x) - x = self.layer2(x) - return x + return self.layer2(x) def training_step(self, batch, batch_idx): _, opt2 = self.optimizers() diff --git a/tests/tests_pytorch/strategies/test_model_parallel_integration.py b/tests/tests_pytorch/strategies/test_model_parallel_integration.py index 00600183f4293..4b3dbe9df9724 100644 --- a/tests/tests_pytorch/strategies/test_model_parallel_integration.py +++ b/tests/tests_pytorch/strategies/test_model_parallel_integration.py @@ -74,8 +74,7 @@ def _parallelize_feed_forward_fsdp2(model, device_mesh): def _parallelize_feed_forward_fsdp2_tp(model, device_mesh): model = _parallelize_feed_forward_tp(model, device_mesh) - model = _parallelize_feed_forward_fsdp2(model, device_mesh) - return model + return _parallelize_feed_forward_fsdp2(model, device_mesh) def _parallelize_with_compile(parallelize): diff --git a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py index dd8042ecf2058..4a32418360aed 100644 --- a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py +++ b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py @@ -324,8 +324,7 @@ def __repr__(self): def __copy__(self): cls = self.__class__ - new_obj = cls(self._store.copy()) - return new_obj + return cls(self._store.copy()) def copy(self): return self.__copy__() diff --git a/tests/tests_pytorch/tuner/test_lr_finder.py b/tests/tests_pytorch/tuner/test_lr_finder.py index 81352ebe256ef..ee8bedace5872 100644 --- a/tests/tests_pytorch/tuner/test_lr_finder.py +++ b/tests/tests_pytorch/tuner/test_lr_finder.py @@ -652,8 +652,7 @@ def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT: x, y = batch z = self.encoder(x) x_hat = self.decoder(z) - loss = F.mse_loss(x_hat, y) - return loss + return F.mse_loss(x_hat, y) def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=self.hparams.lr) diff --git a/tests/tests_pytorch/utilities/test_model_summary.py b/tests/tests_pytorch/utilities/test_model_summary.py index ee6e064077f86..cb419c43cd556 100644 --- a/tests/tests_pytorch/utilities/test_model_summary.py +++ b/tests/tests_pytorch/utilities/test_model_summary.py @@ -81,8 +81,7 @@ def forward(self, x, y): out1 = self.layer1(x) out2 = self.layer2(y) out = self.relu(torch.cat((out1, out2), 1)) - out = self.combine(out) - return out + return self.combine(out) class MixedDtypeModel(LightningModule): diff --git a/tests/tests_pytorch/utilities/test_parameter_tying.py b/tests/tests_pytorch/utilities/test_parameter_tying.py index e45fb39f81b34..e172dcef2faf1 100644 --- a/tests/tests_pytorch/utilities/test_parameter_tying.py +++ b/tests/tests_pytorch/utilities/test_parameter_tying.py @@ -30,8 +30,7 @@ def __init__(self): def forward(self, x): x = self.layer_1(x) x = self.layer_2(x) - x = self.layer_3(x) - return x + return self.layer_3(x) @pytest.mark.parametrize( @@ -67,8 +66,7 @@ def __init__(self): def forward(self, x): x = self.net_a(x) x = self.layer_2(x) - x = self.net_b(x) - return x + return self.net_b(x) model = NestedModule() set_shared_parameters(model, [["layer.weight", "net_a.layer.weight", "net_b.layer.weight"]])