Skip to content

Commit 74f0fd8

Browse files
authored
try pyupgrade-up py38 (#1999)
1 parent 6693b4b commit 74f0fd8

File tree

15 files changed

+23
-25
lines changed

15 files changed

+23
-25
lines changed

extensions/xla/scripts/prepare_alpaca.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,15 +33,15 @@ def prepare(
3333
which stores the preprocessed and tokenized prompts and labels.
3434
"""
3535
if max_seq_length is None:
36-
with open(checkpoint_dir / "model_config.yaml", "r", encoding="utf-8") as file:
36+
with open(checkpoint_dir / "model_config.yaml", encoding="utf-8") as file:
3737
config = yaml.safe_load(file)
3838
max_seq_length = config["block_size"]
3939

4040
destination_path.mkdir(parents=True, exist_ok=True)
4141
data_file_path = destination_path / data_file_name
4242
print("Loading data file...")
4343
download_if_missing(data_file_path, data_file_url)
44-
with open(data_file_path, "r", encoding="utf-8") as file:
44+
with open(data_file_path, encoding="utf-8") as file:
4545
data = json.load(file)
4646

4747
print("Loading tokenizer...")

litgpt/data/alpaca.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def prepare_data(self) -> None:
6363
download_if_missing(self.download_dir / self.file_name, self.file_url)
6464

6565
def setup(self, stage: str = "") -> None:
66-
with open(self.download_dir / self.file_name, "r", encoding="utf-8") as file:
66+
with open(self.download_dir / self.file_name, encoding="utf-8") as file:
6767
data = json.load(file)
6868

6969
# Partition the dataset into train and test

litgpt/data/flan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def _dataloader(self, split: str) -> DataLoader:
107107

108108
def load_jsonl(filename: Path) -> List[Dict[str, str]]:
109109
data = []
110-
with open(filename, "r", encoding="utf-8") as f:
110+
with open(filename, encoding="utf-8") as f:
111111
for line in f:
112112
data.append(json.loads(line))
113113
return data

litgpt/data/json_data.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,10 +139,10 @@ def find_split(self, split_name: str) -> Optional[Path]:
139139

140140
def load_split(json_path: Path) -> Any:
141141
if json_path.suffix == ".json":
142-
with open(json_path, "r", encoding="utf-8") as file:
142+
with open(json_path, encoding="utf-8") as file:
143143
return json.load(file)
144144
if json_path.suffix == ".jsonl":
145-
with open(json_path, "r", encoding="utf-8") as file:
145+
with open(json_path, encoding="utf-8") as file:
146146
return [json.loads(line) for line in file]
147147
else:
148148
raise ValueError(f"Unsupported file format: {json_path.suffix}. Expected `.json` or `.jsonl`.")

litgpt/data/longform.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def val_dataloader(self):
6363
return self._dataloader("val")
6464

6565
def _dataloader(self, split: str) -> DataLoader:
66-
with open(self.download_dir / f"{split}.json", "r", encoding="utf-8") as file:
66+
with open(self.download_dir / f"{split}.json", encoding="utf-8") as file:
6767
data = json.load(file)
6868

6969
dataset = SFTDataset(

litgpt/data/text_files.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ def val_dataloader(self) -> DataLoader:
131131

132132

133133
def tokenize(filename: str, tokenizer: Tokenizer):
134-
with open(filename, "r", encoding="utf-8") as file:
134+
with open(filename, encoding="utf-8") as file:
135135
text = file.read()
136136
text = text.strip()
137137
yield tokenizer.encode(text, bos=True, eos=False)

litgpt/data/tinystories.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def val_dataloader(self) -> DataLoader:
104104

105105

106106
def tokenize(filename: str, tokenizer: Tokenizer):
107-
with open(filename, "r", encoding="utf-8") as f:
107+
with open(filename, encoding="utf-8") as f:
108108
data = json.load(f)
109109
global_rank = int(os.environ["DATA_OPTIMIZER_GLOBAL_RANK"])
110110
num_workers = int(os.environ["DATA_OPTIMIZER_NUM_WORKERS"])

litgpt/generate/sequentially.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from functools import partial
1212
from pathlib import Path
1313
from pprint import pprint
14-
from typing import Literal, Optional
14+
from typing import Literal, Optional, Type
1515

1616
import lightning as L
1717
import torch
@@ -20,7 +20,6 @@
2020
from lightning.fabric.utilities.init import _materialize_meta_tensors
2121
from lightning_utilities.core.imports import RequirementCache
2222
from tqdm import tqdm
23-
from typing_extensions import Type
2423

2524
import litgpt.generate.base as generate_base
2625
from litgpt.config import Config

litgpt/prompts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,7 @@ def save_prompt_style(style: Union[str, PromptStyle], checkpoint_dir: Path) -> N
479479

480480

481481
def load_prompt_style(checkpoint_dir: Path) -> PromptStyle:
482-
with open(checkpoint_dir / "prompt_style.yaml", "r", encoding="utf-8") as file:
482+
with open(checkpoint_dir / "prompt_style.yaml", encoding="utf-8") as file:
483483
config = yaml.safe_load(file)
484484
# Support loading the full module path for user-defined prompt classes
485485
full_module_path, cls_name = config["class_path"].rsplit(".", 1)

litgpt/scripts/merge_lora.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def load_lora_metadata(checkpoint_dir: Path) -> Tuple[Dict[str, Any], Path, Opti
9292
f" the `litgpt/finetune/lora.py` script."
9393
)
9494

95-
with open(hparams_file, "r", encoding="utf-8") as file:
95+
with open(hparams_file, encoding="utf-8") as file:
9696
hparams = yaml.safe_load(file)
9797

9898
lora_params = {k: v for k, v in hparams.items() if k.startswith("lora_")}

0 commit comments

Comments
 (0)