Skip to content

Commit b9920bd

Browse files
committed
--unsafe-fixes
1 parent c11c392 commit b9920bd

File tree

186 files changed

+1039
-1081
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

186 files changed

+1039
-1081
lines changed

.actions/assistant.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from itertools import chain
2323
from os.path import dirname, isfile
2424
from pathlib import Path
25-
from typing import Any, Dict, List, Optional, Tuple
25+
from typing import Any, Optional
2626

2727
from packaging.requirements import Requirement
2828
from packaging.version import Version
@@ -128,7 +128,7 @@ def _parse_requirements(lines: Iterable[str]) -> Iterator[_RequirementWithCommen
128128
pip_argument = None
129129

130130

131-
def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str = "all") -> List[str]:
131+
def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str = "all") -> list[str]:
132132
"""Loading requirements from a file.
133133
134134
>>> path_req = os.path.join(_PROJECT_ROOT, "requirements")
@@ -223,7 +223,7 @@ def _load_aggregate_requirements(req_dir: str = "requirements", freeze_requireme
223223
fp.writelines([ln + os.linesep for ln in requires] + [os.linesep])
224224

225225

226-
def _retrieve_files(directory: str, *ext: str) -> List[str]:
226+
def _retrieve_files(directory: str, *ext: str) -> list[str]:
227227
all_files = []
228228
for root, _, files in os.walk(directory):
229229
for fname in files:
@@ -233,7 +233,7 @@ def _retrieve_files(directory: str, *ext: str) -> List[str]:
233233
return all_files
234234

235235

236-
def _replace_imports(lines: List[str], mapping: List[Tuple[str, str]], lightning_by: str = "") -> List[str]:
236+
def _replace_imports(lines: list[str], mapping: list[tuple[str, str]], lightning_by: str = "") -> list[str]:
237237
"""Replace imports of standalone package to lightning.
238238
239239
>>> lns = [
@@ -321,7 +321,7 @@ def copy_replace_imports(
321321
fo.writelines(lines)
322322

323323

324-
def create_mirror_package(source_dir: str, package_mapping: Dict[str, str]) -> None:
324+
def create_mirror_package(source_dir: str, package_mapping: dict[str, str]) -> None:
325325
"""Create a mirror package with adjusted imports."""
326326
# replace imports and copy the code
327327
mapping = package_mapping.copy()

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ repos:
7474
hooks:
7575
# try to fix what is possible
7676
- id: ruff
77-
args: ["--fix"]
77+
args: ["--fix", "--unsafe-fixes"]
7878
# perform formatting updates
7979
- id: ruff-format
8080
# validate if all is fine with preview mode

examples/fabric/build_your_own_trainer/trainer.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os
22
from collections.abc import Iterable, Mapping
33
from functools import partial
4-
from typing import Any, List, Literal, Optional, Tuple, Union, cast
4+
from typing import Any, Literal, Optional, Union, cast
55

66
import lightning as L
77
import torch
@@ -19,11 +19,11 @@ def __init__(
1919
self,
2020
accelerator: Union[str, Accelerator] = "auto",
2121
strategy: Union[str, Strategy] = "auto",
22-
devices: Union[List[int], str, int] = "auto",
22+
devices: Union[list[int], str, int] = "auto",
2323
precision: Union[str, int] = "32-true",
2424
plugins: Optional[Union[str, Any]] = None,
25-
callbacks: Optional[Union[List[Any], Any]] = None,
26-
loggers: Optional[Union[Logger, List[Logger]]] = None,
25+
callbacks: Optional[Union[list[Any], Any]] = None,
26+
loggers: Optional[Union[Logger, list[Logger]]] = None,
2727
max_epochs: Optional[int] = 1000,
2828
max_steps: Optional[int] = None,
2929
grad_accum_steps: int = 1,
@@ -465,7 +465,7 @@ def get_latest_checkpoint(checkpoint_dir: str) -> Optional[str]:
465465

466466
def _parse_optimizers_schedulers(
467467
self, configure_optim_output
468-
) -> Tuple[
468+
) -> tuple[
469469
Optional[L.fabric.utilities.types.Optimizable],
470470
Optional[Mapping[str, Union[L.fabric.utilities.types.LRScheduler, bool, str, int]]],
471471
]:

examples/fabric/reinforcement_learning/rl/agent.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import math
2-
from typing import Dict, Tuple
32

43
import gymnasium as gym
54
import torch
@@ -43,7 +42,7 @@ def __init__(self, envs: gym.vector.SyncVectorEnv, act_fun: str = "relu", ortho_
4342
layer_init(torch.nn.Linear(64, envs.single_action_space.n), std=0.01, ortho_init=ortho_init),
4443
)
4544

46-
def get_action(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor]:
45+
def get_action(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor]:
4746
logits = self.actor(x)
4847
distribution = Categorical(logits=logits)
4948
if action is None:
@@ -58,12 +57,12 @@ def get_greedy_action(self, x: Tensor) -> Tensor:
5857
def get_value(self, x: Tensor) -> Tensor:
5958
return self.critic(x)
6059

61-
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
60+
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
6261
action, log_prob, entropy = self.get_action(x, action)
6362
value = self.get_value(x)
6463
return action, log_prob, entropy, value
6564

66-
def forward(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
65+
def forward(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
6766
return self.get_action_and_value(x, action)
6867

6968
@torch.no_grad()
@@ -77,7 +76,7 @@ def estimate_returns_and_advantages(
7776
num_steps: int,
7877
gamma: float,
7978
gae_lambda: float,
80-
) -> Tuple[Tensor, Tensor]:
79+
) -> tuple[Tensor, Tensor]:
8180
next_value = self.get_value(next_obs).reshape(1, -1)
8281
advantages = torch.zeros_like(rewards)
8382
lastgaelam = 0
@@ -143,7 +142,7 @@ def __init__(
143142
self.avg_value_loss = MeanMetric(**torchmetrics_kwargs)
144143
self.avg_ent_loss = MeanMetric(**torchmetrics_kwargs)
145144

146-
def get_action(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor]:
145+
def get_action(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor]:
147146
logits = self.actor(x)
148147
distribution = Categorical(logits=logits)
149148
if action is None:
@@ -158,12 +157,12 @@ def get_greedy_action(self, x: Tensor) -> Tensor:
158157
def get_value(self, x: Tensor) -> Tensor:
159158
return self.critic(x)
160159

161-
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
160+
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
162161
action, log_prob, entropy = self.get_action(x, action)
163162
value = self.get_value(x)
164163
return action, log_prob, entropy, value
165164

166-
def forward(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
165+
def forward(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
167166
return self.get_action_and_value(x, action)
168167

169168
@torch.no_grad()
@@ -177,7 +176,7 @@ def estimate_returns_and_advantages(
177176
num_steps: int,
178177
gamma: float,
179178
gae_lambda: float,
180-
) -> Tuple[Tensor, Tensor]:
179+
) -> tuple[Tensor, Tensor]:
181180
next_value = self.get_value(next_obs).reshape(1, -1)
182181
advantages = torch.zeros_like(rewards)
183182
lastgaelam = 0
@@ -193,7 +192,7 @@ def estimate_returns_and_advantages(
193192
returns = advantages + values
194193
return returns, advantages
195194

196-
def training_step(self, batch: Dict[str, Tensor]):
195+
def training_step(self, batch: dict[str, Tensor]):
197196
# Get actions and values given the current observations
198197
_, newlogprob, entropy, newvalue = self(batch["obs"], batch["actions"].long())
199198
logratio = newlogprob - batch["logprobs"]

examples/fabric/reinforcement_learning/train_fabric.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
import os
2222
import time
2323
from datetime import datetime
24-
from typing import Dict
2524

2625
import gymnasium as gym
2726
import torch
@@ -38,7 +37,7 @@ def train(
3837
fabric: Fabric,
3938
agent: PPOLightningAgent,
4039
optimizer: torch.optim.Optimizer,
41-
data: Dict[str, Tensor],
40+
data: dict[str, Tensor],
4241
global_step: int,
4342
args: argparse.Namespace,
4443
):

examples/fabric/reinforcement_learning/train_torch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
import random
2323
import time
2424
from datetime import datetime
25-
from typing import Dict
2625

2726
import gymnasium as gym
2827
import torch
@@ -41,7 +40,7 @@
4140
def train(
4241
agent: PPOAgent,
4342
optimizer: torch.optim.Optimizer,
44-
data: Dict[str, Tensor],
43+
data: dict[str, Tensor],
4544
logger: SummaryWriter,
4645
global_step: int,
4746
args: argparse.Namespace,

examples/fabric/tensor_parallel/model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
from dataclasses import dataclass
12-
from typing import Optional, Tuple
12+
from typing import Optional
1313

1414
import torch
1515
import torch.nn.functional as F
@@ -87,7 +87,7 @@ def apply_rotary_emb(
8787
xq: torch.Tensor,
8888
xk: torch.Tensor,
8989
freqs_cis: torch.Tensor,
90-
) -> Tuple[torch.Tensor, torch.Tensor]:
90+
) -> tuple[torch.Tensor, torch.Tensor]:
9191
"""Apply rotary embeddings to input tensors using the given frequency tensor.
9292
9393
This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided

examples/pytorch/basics/autoencoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"""
1919

2020
from os import path
21-
from typing import Optional, Tuple
21+
from typing import Optional
2222

2323
import torch
2424
import torch.nn.functional as F
@@ -45,7 +45,7 @@ def __init__(
4545
nrow: int = 8,
4646
padding: int = 2,
4747
normalize: bool = True,
48-
value_range: Optional[Tuple[int, int]] = None,
48+
value_range: Optional[tuple[int, int]] = None,
4949
scale_each: bool = False,
5050
pad_value: int = 0,
5151
) -> None:

examples/pytorch/domain_templates/reinforce_learn_Qnet.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
import random
3737
from collections import OrderedDict, deque, namedtuple
3838
from collections.abc import Iterator
39-
from typing import List, Tuple
4039

4140
import gym
4241
import torch
@@ -103,7 +102,7 @@ def append(self, experience: Experience) -> None:
103102
"""
104103
self.buffer.append(experience)
105104

106-
def sample(self, batch_size: int) -> Tuple:
105+
def sample(self, batch_size: int) -> tuple:
107106
indices = random.sample(range(len(self.buffer)), batch_size)
108107
states, actions, rewards, dones, next_states = zip(*(self.buffer[idx] for idx in indices))
109108

@@ -191,7 +190,7 @@ def get_action(self, net: nn.Module, epsilon: float, device: str) -> int:
191190
return action
192191

193192
@torch.no_grad()
194-
def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = "cpu") -> Tuple[float, bool]:
193+
def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = "cpu") -> tuple[float, bool]:
195194
"""Carries out a single interaction step between the agent and the environment.
196195
197196
Args:
@@ -296,7 +295,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
296295
"""
297296
return self.net(x)
298297

299-
def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
298+
def dqn_mse_loss(self, batch: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
300299
"""Calculates the mse loss using a mini batch from the replay buffer.
301300
302301
Args:
@@ -319,7 +318,7 @@ def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor
319318

320319
return nn.MSELoss()(state_action_values, expected_state_action_values)
321320

322-
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:
321+
def training_step(self, batch: tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:
323322
"""Carries out a single step through the environment to update the replay buffer. Then calculates loss based on
324323
the minibatch received.
325324
@@ -357,7 +356,7 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> O
357356

358357
return OrderedDict({"loss": loss, "log": log, "progress_bar": log})
359358

360-
def configure_optimizers(self) -> List[Optimizer]:
359+
def configure_optimizers(self) -> list[Optimizer]:
361360
"""Initialize Adam optimizer."""
362361
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
363362
return [optimizer]

examples/pytorch/domain_templates/reinforce_learn_ppo.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
import argparse
3333
from collections.abc import Iterator
34-
from typing import Callable, List, Tuple
34+
from typing import Callable
3535

3636
import gym
3737
import torch
@@ -42,7 +42,7 @@
4242
from torch.utils.data import DataLoader, IterableDataset
4343

4444

45-
def create_mlp(input_shape: Tuple[int], n_actions: int, hidden_size: int = 128):
45+
def create_mlp(input_shape: tuple[int], n_actions: int, hidden_size: int = 128):
4646
"""Simple Multi-Layer Perceptron network."""
4747
return nn.Sequential(
4848
nn.Linear(input_shape[0], hidden_size),
@@ -228,7 +228,7 @@ def __init__(
228228

229229
self.state = torch.FloatTensor(self.env.reset())
230230

231-
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
231+
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
232232
"""Passes in a state x through the network and returns the policy and a sampled action.
233233
234234
Args:
@@ -243,7 +243,7 @@ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Te
243243

244244
return pi, action, value
245245

246-
def discount_rewards(self, rewards: List[float], discount: float) -> List[float]:
246+
def discount_rewards(self, rewards: list[float], discount: float) -> list[float]:
247247
"""Calculate the discounted rewards of all rewards in list.
248248
249249
Args:
@@ -264,7 +264,7 @@ def discount_rewards(self, rewards: List[float], discount: float) -> List[float]
264264

265265
return list(reversed(cumul_reward))
266266

267-
def calc_advantage(self, rewards: List[float], values: List[float], last_value: float) -> List[float]:
267+
def calc_advantage(self, rewards: list[float], values: list[float], last_value: float) -> list[float]:
268268
"""Calculate the advantage given rewards, state values, and the last value of episode.
269269
270270
Args:
@@ -282,7 +282,7 @@ def calc_advantage(self, rewards: List[float], values: List[float], last_value:
282282
delta = [rews[i] + self.gamma * vals[i + 1] - vals[i] for i in range(len(rews) - 1)]
283283
return self.discount_rewards(delta, self.gamma * self.lam)
284284

285-
def generate_trajectory_samples(self) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
285+
def generate_trajectory_samples(self) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]:
286286
"""
287287
Contains the logic for generating trajectory data to train policy and value network
288288
Yield:
@@ -376,7 +376,7 @@ def critic_loss(self, state, action, logp_old, qval, adv) -> torch.Tensor:
376376
value = self.critic(state)
377377
return (qval - value).pow(2).mean()
378378

379-
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]):
379+
def training_step(self, batch: tuple[torch.Tensor, torch.Tensor]):
380380
"""Carries out a single update to actor and critic network from a batch of replay buffer.
381381
382382
Args:
@@ -407,7 +407,7 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]):
407407
self.log("loss_critic", loss_critic, on_step=False, on_epoch=True, prog_bar=False, logger=True)
408408
self.log("loss_actor", loss_actor, on_step=False, on_epoch=True, prog_bar=True, logger=True)
409409

410-
def configure_optimizers(self) -> List[Optimizer]:
410+
def configure_optimizers(self) -> list[Optimizer]:
411411
"""Initialize Adam optimizer."""
412412
optimizer_actor = torch.optim.Adam(self.actor.parameters(), lr=self.lr_actor)
413413
optimizer_critic = torch.optim.Adam(self.critic.parameters(), lr=self.lr_critic)

0 commit comments

Comments
 (0)