Skip to content

Commit f829eb2

Browse files
authored
[2/N] Tiny manually fix for Ruff default ruleset and add to pre-commit (#992)
1 parent 4582feb commit f829eb2

27 files changed

+51
-40
lines changed

.pre-commit-config.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,12 @@ repos:
1717
args: ['--maxkb=1000']
1818
- id: requirements-txt-fixer
1919

20+
- repo: https://github.com/astral-sh/ruff-pre-commit
21+
rev: v0.14.7
22+
hooks:
23+
- id: ruff-check
24+
args: [ --fix ]
25+
2026
- repo: https://github.com/PyCQA/autoflake
2127
rev: v2.0.2
2228
hooks:

examples/eval/eval_delegate_rollout.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
from slime.rollout.base_types import RolloutFnEvalOutput, RolloutFnTrainOutput
1212
from slime.rollout.sglang_rollout import generate_rollout as base_generate_rollout
13-
from slime.utils.metric_utils import compute_rollout_step
1413

1514
logger = logging.getLogger(__name__)
1615

@@ -83,7 +82,6 @@ def _log_delegate_metrics(args, rollout_id: int, metrics: dict | None, raw_respo
8382
if raw_response is not None:
8483
logger.info("External eval raw response for rollout %s: %s", rollout_id, raw_response)
8584
logger.info("eval %s (external): %s", rollout_id, flattened)
86-
step = compute_rollout_step(args, rollout_id)
8785
return flattened
8886

8987

examples/fully_async/fully_async_rollout.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import asyncio
2+
import atexit
23
import queue
34
import threading
45
import time
@@ -259,6 +260,5 @@ def generate_rollout_fully_async(args, rollout_id, data_buffer, evaluation=False
259260

260261

261262
# Register exit cleanup function
262-
import atexit
263263

264264
atexit.register(stop_global_worker)

examples/search-r1/local_dense_retriever/retrieval_server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ def retrieve_endpoint(request: QueryRequest):
360360
scores = []
361361
try:
362362
results, scores = tmp
363-
except:
363+
except ValueError:
364364
results = tmp
365365

366366
# Format response

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ line_length = 119
2424

2525
[tool.ruff]
2626
line-length = 119
27+
ignore = [
28+
"E402",
29+
]
2730

2831
[tool.pytest.ini_options]
2932
# durations=0 will display all tests execution time, sorted in ascending order starting from from the slowest one.

slime/backends/fsdp_utils/actor.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,8 @@ def _get_init_weight_context_manager(self):
217217
# Check if model uses tied word embeddings (which doesn't work with meta tensors)
218218
use_meta_tensor = not self.hf_config.tie_word_embeddings
219219

220-
cpu_init_weights = lambda: torch.device("cpu")
220+
def cpu_init_weights():
221+
return torch.device("cpu")
221222

222223
if use_meta_tensor:
223224
# Rank 0: CPU, others: meta device (memory efficient for large models)

slime/backends/megatron_utils/megatron_to_hf/deepseekv3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def convert_deepseekv3_to_hf(args, name, param):
1515

1616
try:
1717
head_dim = args.kv_channels if args.kv_channels is not None else args.hidden_size // args.num_attention_heads
18-
except:
18+
except AttributeError:
1919
head_dim = args.hidden_size // args.num_attention_heads
2020
value_num_per_group = args.num_attention_heads // args.num_query_groups
2121

slime/backends/megatron_utils/megatron_to_hf/glm4.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def convert_glm4_to_hf(args, name, param):
1212

1313
try:
1414
head_dim = args.kv_channels if args.kv_channels is not None else args.hidden_size // args.num_attention_heads
15-
except:
15+
except AttributeError:
1616
head_dim = args.hidden_size // args.num_attention_heads
1717
value_num_per_group = args.num_attention_heads // args.num_query_groups
1818

slime/backends/megatron_utils/megatron_to_hf/glm4moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def convert_glm4moe_to_hf(args, name, param):
1515

1616
try:
1717
head_dim = args.kv_channels if args.kv_channels is not None else args.hidden_size // args.num_attention_heads
18-
except:
18+
except AttributeError:
1919
head_dim = args.hidden_size // args.num_attention_heads
2020
value_num_per_group = args.num_attention_heads // args.num_query_groups
2121

slime/backends/megatron_utils/megatron_to_hf/llama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def convert_llama_to_hf(args, name, param):
1212

1313
try:
1414
head_dim = args.kv_channels if args.kv_channels is not None else args.hidden_size // args.num_attention_heads
15-
except:
15+
except AttributeError:
1616
head_dim = args.hidden_size // args.num_attention_heads
1717
value_num_per_group = args.num_attention_heads // args.num_query_groups
1818

0 commit comments

Comments
 (0)