Skip to content

Commit 273650f

Browse files
pre-commit-ci[bot]lantiga
authored andcommitted
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent e29a8b1 commit 273650f

File tree

176 files changed

+821
-943
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

176 files changed

+821
-943
lines changed

examples/app/dag/app.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,9 @@ def __init__(self, models_paths: list):
6565
)
6666

6767
# Step 3: Create the work to train the models_paths in parallel.
68-
self.dict = Dict(
69-
**{model_path.split(".")[-1]: ModelWork(model_path, parallel=True) for model_path in models_paths}
70-
)
68+
self.dict = Dict(**{
69+
model_path.split(".")[-1]: ModelWork(model_path, parallel=True) for model_path in models_paths
70+
})
7171

7272
# Step 4: Some element to track components progress.
7373
self.has_completed = False

examples/app/server/app.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,11 @@ def setup(self):
2020
def predict(self, request):
2121
image = base64.b64decode(request.image.encode("utf-8"))
2222
image = Image.open(io.BytesIO(image))
23-
transforms = torchvision.transforms.Compose(
24-
[
25-
torchvision.transforms.Resize(224),
26-
torchvision.transforms.ToTensor(),
27-
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
28-
]
29-
)
23+
transforms = torchvision.transforms.Compose([
24+
torchvision.transforms.Resize(224),
25+
torchvision.transforms.ToTensor(),
26+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
27+
])
3028
image = transforms(image)
3129
image = image.to(self._device)
3230
prediction = self._model(image.unsqueeze(0))

examples/app/server_with_auto_scaler/app.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,11 @@ def setup(self):
3434
self._model = torchvision.models.resnet18(pretrained=True).to(self._device)
3535

3636
def predict(self, requests: BatchRequestModel):
37-
transforms = torchvision.transforms.Compose(
38-
[
39-
torchvision.transforms.Resize(224),
40-
torchvision.transforms.ToTensor(),
41-
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
42-
]
43-
)
37+
transforms = torchvision.transforms.Compose([
38+
torchvision.transforms.Resize(224),
39+
torchvision.transforms.ToTensor(),
40+
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
41+
])
4442
images = []
4543
for request in requests.inputs:
4644
image = app.components.serve.types.image.Image.deserialize(request.image)

examples/fabric/dcgan/train_fabric.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
Code adapted from the official PyTorch DCGAN tutorial:
55
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
66
"""
7+
78
import os
89
import time
910
from pathlib import Path
@@ -55,14 +56,12 @@ def main():
5556
root=dataroot,
5657
split="all",
5758
download=True,
58-
transform=transforms.Compose(
59-
[
60-
transforms.Resize(image_size),
61-
transforms.CenterCrop(image_size),
62-
transforms.ToTensor(),
63-
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64-
]
65-
),
59+
transform=transforms.Compose([
60+
transforms.Resize(image_size),
61+
transforms.CenterCrop(image_size),
62+
transforms.ToTensor(),
63+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64+
]),
6665
)
6766

6867
# Create the dataloader
@@ -227,7 +226,7 @@ def __init__(self):
227226
nn.ReLU(True),
228227
# state size. (ngf) x 32 x 32
229228
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
230-
nn.Tanh()
229+
nn.Tanh(),
231230
# state size. (nc) x 64 x 64
232231
)
233232

examples/fabric/dcgan/train_torch.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
Code adapted from the official PyTorch DCGAN tutorial:
55
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
66
"""
7+
78
import os
89
import random
910
import time
@@ -55,14 +56,12 @@ def main():
5556
root=dataroot,
5657
split="all",
5758
download=True,
58-
transform=transforms.Compose(
59-
[
60-
transforms.Resize(image_size),
61-
transforms.CenterCrop(image_size),
62-
transforms.ToTensor(),
63-
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64-
]
65-
),
59+
transform=transforms.Compose([
60+
transforms.Resize(image_size),
61+
transforms.CenterCrop(image_size),
62+
transforms.ToTensor(),
63+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64+
]),
6665
)
6766

6867
# Create the dataloader
@@ -236,7 +235,7 @@ def __init__(self):
236235
nn.ReLU(True),
237236
# state size. (ngf) x 32 x 32
238237
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
239-
nn.Tanh()
238+
nn.Tanh(),
240239
# state size. (nc) x 64 x 64
241240
)
242241

examples/fabric/meta_learning/train_fabric.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
Run it with:
1515
lightning run model train_fabric.py --accelerator=cuda --devices=2 --strategy=ddp
1616
"""
17+
1718
import cherry
1819
import learn2learn as l2l
1920
import torch

examples/fabric/meta_learning/train_torch.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
Run it with:
1616
torchrun --nproc_per_node=2 --standalone train_torch.py
1717
"""
18+
1819
import os
1920
import random
2021

examples/fabric/reinforcement_learning/train_fabric.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -84,14 +84,10 @@ def main(args: argparse.Namespace):
8484
)
8585

8686
# Environment setup
87-
envs = gym.vector.SyncVectorEnv(
88-
[
89-
make_env(
90-
args.env_id, args.seed + rank * args.num_envs + i, rank, args.capture_video, logger.log_dir, "train"
91-
)
92-
for i in range(args.num_envs)
93-
]
94-
)
87+
envs = gym.vector.SyncVectorEnv([
88+
make_env(args.env_id, args.seed + rank * args.num_envs + i, rank, args.capture_video, logger.log_dir, "train")
89+
for i in range(args.num_envs)
90+
])
9591
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"
9692

9793
# Define the agent and the optimizer and setup them with Fabric

examples/fabric/reinforcement_learning/train_fabric_decoupled.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ def player(args, world_collective: TorchCollective, player_trainer_collective: T
5959
)
6060

6161
# Environment setup
62-
envs = gym.vector.SyncVectorEnv(
63-
[make_env(args.env_id, args.seed + i, 0, args.capture_video, log_dir, "train") for i in range(args.num_envs)]
64-
)
62+
envs = gym.vector.SyncVectorEnv([
63+
make_env(args.env_id, args.seed + i, 0, args.capture_video, log_dir, "train") for i in range(args.num_envs)
64+
])
6565
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"
6666

6767
# Define the agent

examples/fabric/reinforcement_learning/train_torch.py

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -142,19 +142,17 @@ def main(args: argparse.Namespace):
142142
)
143143

144144
# Environment setup
145-
envs = gym.vector.SyncVectorEnv(
146-
[
147-
make_env(
148-
args.env_id,
149-
args.seed + global_rank * args.num_envs + i,
150-
global_rank,
151-
args.capture_video,
152-
logger.log_dir if global_rank == 0 else None,
153-
"train",
154-
)
155-
for i in range(args.num_envs)
156-
]
157-
)
145+
envs = gym.vector.SyncVectorEnv([
146+
make_env(
147+
args.env_id,
148+
args.seed + global_rank * args.num_envs + i,
149+
global_rank,
150+
args.capture_video,
151+
logger.log_dir if global_rank == 0 else None,
152+
"train",
153+
)
154+
for i in range(args.num_envs)
155+
])
158156
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"
159157

160158
# Define the agent and the optimizer and setup them with DistributedDataParallel

0 commit comments

Comments
 (0)