Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,6 @@ ignore = [
"S101", # todo: Use of `assert` detected
"S105", "S106", "S107", # todo: Possible hardcoded password: ...
"S113", # todo: Probable use of requests call without timeout
"S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
"S324", # todo: Probable use of insecure hash functions in `hashlib`
"S403", # todo: `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
"S404", # todo: `subprocess` module is possibly insecure
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
"S603", # todo: `subprocess` call: check for execution of untrusted input
Expand All @@ -106,20 +103,15 @@ ignore = [
]
"tests/**" = [
"S101", # Use of `assert` detected
"S105", "S106", # todo: Possible hardcoded password: ...
"S301", # `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
"S113", # todo: Probable use of requests call without timeout
"S311", # todo: Standard pseudo-random generators are not suitable for cryptographic purposes
"S108", # todo: Probable insecure usage of temporary file or directory: "/tmp/sys-customizations-sync"
"S202", # Uses of `tarfile.extractall()`
"S403", # `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
"S404", # `subprocess` module is possibly insecure
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
"S603", # todo: `subprocess` call: check for execution of untrusted input
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
"S607", # todo: Starting a process with a partial executable path
"RET504", # todo:Unnecessary variable assignment before `return` statement
"PT004", # todo: Fixture `tmpdir_unittest_fixture` does not return anything, add leading underscore
"PT012", # todo: `pytest.raises()` block should contain a single simple statement
"PT019", # todo: Fixture `_` without value is injected as parameter, use `@pytest.mark.usefixtures` instead
]
Expand Down
3 changes: 1 addition & 2 deletions tests/parity_fabric/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ def forward(self, x):
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
return self.fc3(x)

def get_optimizer(self):
return torch.optim.SGD(self.parameters(), lr=0.0001)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,7 @@ def _parallelize_feed_forward_fsdp2(model, device_mesh):

def _parallelize_feed_forward_fsdp2_tp(model, device_mesh):
model = _parallelize_feed_forward_tp(model, device_mesh)
model = _parallelize_feed_forward_fsdp2(model, device_mesh)
return model
return _parallelize_feed_forward_fsdp2(model, device_mesh)


@RunIf(min_torch="2.4", standalone=True, min_cuda_gpus=4)
Expand Down
6 changes: 2 additions & 4 deletions tests/tests_pytorch/accelerators/test_xla.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ def __init__(self):
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
return x
return self.layer_3(x)


@RunIf(tpu=True, standalone=True)
Expand Down Expand Up @@ -230,8 +229,7 @@ def __init__(self):
def forward(self, x):
x = self.net_a(x)
x = self.layer_2(x)
x = self.net_b(x)
return x
return self.net_b(x)


@RunIf(tpu=True)
Expand Down
6 changes: 2 additions & 4 deletions tests/tests_pytorch/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,8 +428,7 @@ def __init__(self):

def forward(self, x):
x = self.linear_a(x)
x = self.linear_b(x)
return x
return self.linear_b(x)

def configure_optimizers(self):
param_groups = [
Expand Down Expand Up @@ -603,8 +602,7 @@ def __init__(self, lr, momentum):

def forward(self, x):
x = self.linear_a(x)
x = self.linear_b(x)
return x
return self.linear_b(x)

def configure_optimizers(self):
param_groups = [
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/callbacks/test_spike.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ def training_step(self, batch, batch_idx: int):
if curr_loss_val is None:
curr_loss_val = batch_idx

loss = self.layer(torch.tensor(curr_loss_val, device=self.device, dtype=self.dtype).view(1, 1))
return loss
return self.layer(torch.tensor(curr_loss_val, device=self.device, dtype=self.dtype).view(1, 1))

def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=1e-3)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,7 @@ def __init__(self):

def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
return self.layer2(x)

def configure_optimizers(self):
params = [{"params": self.layer1.parameters(), "lr": 0.1}, {"params": self.layer2.parameters(), "lr": 0.2}]
Expand Down
6 changes: 2 additions & 4 deletions tests/tests_pytorch/helpers/advanced_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ def block(in_feat, out_feat, normalize=True):

def forward(self, z):
img = self.model(z)
img = img.view(img.size(0), *self.img_shape)
return img
return img.view(img.size(0), *self.img_shape)


class Discriminator(nn.Module):
Expand Down Expand Up @@ -204,8 +203,7 @@ def forward(self, x):
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
return x
return self.c_d2(x)

def training_step(self, batch, batch_nb):
x, y = batch
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/helpers/simple_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,7 @@ def forward(self, x):
x = self.layer_1a(x)
x = self.layer_2(x)
x = self.layer_2a(x)
x = self.layer_end(x)
return x
return self.layer_end(x)

def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/models/test_hparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,8 +417,7 @@ def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if ".ckpt" in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
return os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)


@pytest.mark.parametrize("base_class", [HyperparametersMixin, LightningModule, LightningDataModule])
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/plugins/test_amp_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,7 @@ def __init__(self):

def forward(self, x: Tensor):
x = self.layer1(x)
x = self.layer2(x)
return x
return self.layer2(x)

def training_step(self, batch, batch_idx):
_, opt2 = self.optimizers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ def _parallelize_feed_forward_fsdp2(model, device_mesh):

def _parallelize_feed_forward_fsdp2_tp(model, device_mesh):
model = _parallelize_feed_forward_tp(model, device_mesh)
model = _parallelize_feed_forward_fsdp2(model, device_mesh)
return model
return _parallelize_feed_forward_fsdp2(model, device_mesh)


def _parallelize_with_compile(parallelize):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,7 @@ def __repr__(self):

def __copy__(self):
cls = self.__class__
new_obj = cls(self._store.copy())
return new_obj
return cls(self._store.copy())

def copy(self):
return self.__copy__()
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/tuner/test_lr_finder.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,8 +652,7 @@ def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
x, y = batch
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, y)
return loss
return F.mse_loss(x_hat, y)

def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
Expand Down
3 changes: 1 addition & 2 deletions tests/tests_pytorch/utilities/test_model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,7 @@ def forward(self, x, y):
out1 = self.layer1(x)
out2 = self.layer2(y)
out = self.relu(torch.cat((out1, out2), 1))
out = self.combine(out)
return out
return self.combine(out)


class MixedDtypeModel(LightningModule):
Expand Down
6 changes: 2 additions & 4 deletions tests/tests_pytorch/utilities/test_parameter_tying.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ def __init__(self):
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
return x
return self.layer_3(x)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -67,8 +66,7 @@ def __init__(self):
def forward(self, x):
x = self.net_a(x)
x = self.layer_2(x)
x = self.net_b(x)
return x
return self.net_b(x)

model = NestedModule()
set_shared_parameters(model, [["layer.weight", "net_a.layer.weight", "net_b.layer.weight"]])
Expand Down
Loading