Skip to content

Commit 048633a

Browse files
[pre-commit.ci] pre-commit suggestions (#2009)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <[email protected]>
1 parent b157e9c commit 048633a

24 files changed

+55
-60
lines changed

.pre-commit-config.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ repos:
3737
- id: detect-private-key
3838

3939
- repo: https://github.com/codespell-project/codespell
40-
rev: v2.3.0
40+
rev: v2.4.1
4141
hooks:
4242
- id: codespell
4343
additional_dependencies: [tomli]
@@ -70,19 +70,19 @@ repos:
7070
args: ["--print-width=140"]
7171

7272
- repo: https://github.com/astral-sh/ruff-pre-commit
73-
rev: v0.8.6
73+
rev: v0.11.4
7474
hooks:
7575
- id: ruff
7676
args: ["--fix"]
7777
- id: ruff-format
7878
- id: ruff
7979

8080
- repo: https://github.com/tox-dev/pyproject-fmt
81-
rev: v2.5.0
81+
rev: v2.5.1
8282
hooks:
8383
- id: pyproject-fmt
8484
additional_dependencies: [tox]
8585
- repo: https://github.com/abravalheri/validate-pyproject
86-
rev: v0.23
86+
rev: v0.24.1
8787
hooks:
8888
- id: validate-pyproject

extensions/thunder/pretrain.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ def main(
256256
eval=eval,
257257
optimizer=optimizer,
258258
)
259-
fabric.print(f"Training time: {(time.perf_counter()-train_time):.2f}s")
259+
fabric.print(f"Training time: {(time.perf_counter() - train_time):.2f}s")
260260

261261
# Save final checkpoint
262262
save_checkpoint(fabric, state, tokenizer_dir, out_dir / "final" / "lit_model.pth")
@@ -364,7 +364,7 @@ def fit(
364364
if isinstance(val_loss, float):
365365
val_loss = f"{val_loss:.3f}"
366366
fabric.print(
367-
f"Epoch {metrics['epoch']+1} | iter {metrics['iter']} step {metrics['step']} |"
367+
f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
368368
f" loss train: {metrics['loss']:.3f},"
369369
f" val: {val_loss} |"
370370
f" iter time: {metrics['iter_time'] * 1000:.2f} ms"

extensions/thunder/unsloth/kernels/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def calculate_settings(n):
2626
BLOCK_SIZE = next_power_of_2(n)
2727
if BLOCK_SIZE > MAX_FUSED_SIZE:
2828
raise RuntimeError(
29-
f"Cannot launch Triton kernel since n = {n} exceeds " f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}."
29+
f"Cannot launch Triton kernel since n = {n} exceeds the maximum CUDA blocksize = {MAX_FUSED_SIZE}."
3030
)
3131
num_warps = 4
3232
if BLOCK_SIZE >= 32768:

extensions/xla/finetune/adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def main(fabric: L.Fabric, data_dir: Path, checkpoint_dir: Path, out_dir: Path)
112112

113113
train_time = time.perf_counter()
114114
train(fabric, model, optimizer, train_data, val_data, checkpoint_dir, out_dir)
115-
rank_print(fabric, f"Training time: {(time.perf_counter()-train_time):.2f}s")
115+
rank_print(fabric, f"Training time: {(time.perf_counter() - train_time):.2f}s")
116116

117117
# Save the final checkpoint at the end of training
118118
save_path = out_dir / "lit_model_adapter_finetuned.pth"

litgpt/api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -677,7 +677,7 @@ def print_table(header, data):
677677
mean_str = "N/A"
678678
std_dev_str = "N/A"
679679

680-
markdown_table += f"| {key:<36} | {first_iteration:<15} | " f"{mean_str:<17} | {std_dev_str:<23} |\n"
680+
markdown_table += f"| {key:<36} | {first_iteration:<15} | {mean_str:<17} | {std_dev_str:<23} |\n"
681681
print(markdown_table)
682682

683683
import subprocess

litgpt/chat/base.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,7 @@ def process_prompt(
114114
for block in model.transformer.h:
115115
block.attn.kv_cache.reset_parameters()
116116
fabric.print(
117-
f"\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec,"
118-
f" {tokens_generated} tokens",
117+
f"\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec, {tokens_generated} tokens",
119118
file=sys.stderr,
120119
)
121120
fabric.print()

litgpt/finetune/adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def setup(
103103
raise ValueError("Quantization and mixed precision is not supported.")
104104
if RequirementCache("bitsandbytes != 0.42.0"):
105105
warnings.warn(
106-
"LitGPT only supports bitsandbytes v0.42.0. " "This may result in errors when using quantization."
106+
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
107107
)
108108
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
109109
plugins = BitsandbytesPrecision(quantize[4:], dtype)

litgpt/finetune/adapter_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def setup(
105105
raise ValueError("Quantization and mixed precision is not supported.")
106106
if RequirementCache("bitsandbytes != 0.42.0"):
107107
warnings.warn(
108-
"LitGPT only supports bitsandbytes v0.42.0. " "This may result in errors when using quantization."
108+
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
109109
)
110110
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
111111
plugins = BitsandbytesPrecision(quantize[4:], dtype)

litgpt/finetune/full.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ def fit(
307307
if isinstance(val_loss, torch.Tensor):
308308
val_loss = f"{val_loss:.3f}"
309309
fabric.print(
310-
f"Epoch {metrics['epoch']+1} | iter {metrics['iter']} step {metrics['step']} |"
310+
f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
311311
f" loss train: {metrics['loss']:.3f},"
312312
f" val: {val_loss} |"
313313
f" iter time: {metrics['iter_time'] * 1000:.2f} ms"

litgpt/finetune/lora.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def setup(
133133
raise ValueError("Quantization and mixed precision is not supported.")
134134
if RequirementCache("bitsandbytes != 0.42.0"):
135135
warnings.warn(
136-
"LitGPT only supports bitsandbytes v0.42.0. " "This may result in errors when using quantization."
136+
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
137137
)
138138
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
139139
plugins = BitsandbytesPrecision(quantize[4:], dtype)
@@ -366,7 +366,7 @@ def fit(
366366
if isinstance(val_loss, torch.Tensor):
367367
val_loss = f"{val_loss:.3f}"
368368
fabric.print(
369-
f"Epoch {metrics['epoch']+1} | iter {metrics['iter']} step {metrics['step']} |"
369+
f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
370370
f" loss train: {metrics['loss']:.3f},"
371371
f" val: {val_loss} |"
372372
f" iter time: {metrics['iter_time'] * 1000:.2f} ms"

0 commit comments

Comments
 (0)