Skip to content

Commit 4582feb

Browse files
authored
[1/N] Tiny execute Ruff auto lint (#991)
1 parent aa7e66a commit 4582feb

File tree

17 files changed

+23
-23
lines changed

17 files changed

+23
-23
lines changed

examples/formal_math/single_round/run.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def execute():
134134

135135
sglang_args = (
136136
# "--rollout-num-gpus-per-engine 2 "
137-
f"--rollout-num-gpus-per-engine 8 " # temp use 1 engine per node to avoid flashinfer err
137+
"--rollout-num-gpus-per-engine 8 " # temp use 1 engine per node to avoid flashinfer err
138138
"--sglang-mem-fraction-static 0.7 "
139139
)
140140

examples/search-r1/generate_with_search.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ async def execute_predictions(prediction: str) -> str:
134134
next_obs = ""
135135
done = True
136136
else:
137-
next_obs = f"\nMy previous action is invalid. \
137+
next_obs = "\nMy previous action is invalid. \
138138
If I want to search, I should put the query between <search> and </search>. \
139139
If I want to give the final answer, I should put the answer between <answer> and </answer>. Let me try again.\n"
140140
done = False
@@ -143,7 +143,7 @@ async def execute_predictions(prediction: str) -> str:
143143

144144

145145
async def generate(args, sample: Sample, sampling_params) -> Sample:
146-
assert not args.partial_rollout, f"Partial rollout is not supported for this function at the moment."
146+
assert not args.partial_rollout, "Partial rollout is not supported for this function at the moment."
147147

148148
state = GenerateState(args)
149149

examples/search-r1/qa_em_format.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def compute_score_em(
180180
do_print = random.randint(1, 64) == 1
181181

182182
if do_print:
183-
print(f"--------------------------------")
183+
print("--------------------------------")
184184
print(f"Golden answers: {ground_truth['target']}")
185185
print(f"Extracted answer: {answer}")
186186
print(f"Solution string: {solution_str}")

scripts/run_deepseek.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def train(args: ScriptArgs):
158158
"--prompt-data /root/datasets/gsm8k/train.parquet "
159159
"--input-key messages "
160160
# Deliberately make it very short for this easy task
161-
f"--rollout-max-response-len 256 "
161+
"--rollout-max-response-len 256 "
162162
)
163163
eval_args += (
164164
"--eval-prompt-data gsm8k /root/datasets/gsm8k/test.parquet "

scripts/run_glm45_355b_a32b.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ def train(args: ScriptArgs):
166166
"--prompt-data /root/datasets/gsm8k/train.parquet "
167167
"--input-key messages "
168168
# Deliberately make it very short for this easy task
169-
f"--rollout-max-response-len 256 "
169+
"--rollout-max-response-len 256 "
170170
)
171171
eval_args += (
172172
"--eval-prompt-data gsm8k /root/datasets/gsm8k/test.parquet "
@@ -179,7 +179,7 @@ def train(args: ScriptArgs):
179179
perf_args = (
180180
"--tensor-model-parallel-size 4 "
181181
"--sequence-parallel "
182-
f"--pipeline-model-parallel-size 1 "
182+
"--pipeline-model-parallel-size 1 "
183183
"--context-parallel-size 1 "
184184
"--expert-model-parallel-size 4 "
185185
"--expert-tensor-parallel-size 1 "

scripts/run_qwen3_4b.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def execute(args: ScriptArgs):
160160
"--adam-beta2 0.98 "
161161
)
162162

163-
sglang_args = f"--rollout-num-gpus-per-engine 1 " "--sglang-chunked-prefill-size 4096 "
163+
sglang_args = "--rollout-num-gpus-per-engine 1 " "--sglang-chunked-prefill-size 4096 "
164164

165165
match args.train_backend:
166166
case "fsdp":
@@ -171,7 +171,7 @@ def execute(args: ScriptArgs):
171171
f"--update-weight-buffer-size {512 * 1024 * 1024} " # 512MB
172172
"""--train-env-vars '{"PYTORCH_CUDA_ALLOC_CONF":"expandable_segments:True"}' """
173173
)
174-
sglang_args += f"--sglang-mem-fraction-static 0.75 "
174+
sglang_args += "--sglang-mem-fraction-static 0.75 "
175175
perf_args = "--use-dynamic-batch-size " "--max-tokens-per-gpu 32768 "
176176

177177
case "megatron":
@@ -196,7 +196,7 @@ def execute(args: ScriptArgs):
196196
"--train-memory-margin-bytes 3221225472 "
197197
)
198198
# TODO improve
199-
sglang_args += f"--sglang-mem-fraction-static 0.7 "
199+
sglang_args += "--sglang-mem-fraction-static 0.7 "
200200
perf_args = "--use-dynamic-batch-size " "--max-tokens-per-gpu 9216 "
201201

202202
case _:

slime/backends/fsdp_utils/models/qwen3_moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def fused_experts_impl(
4444
):
4545
padded_size = 0
4646

47-
assert hidden_states.shape[1] == w1.shape[2] - padded_size, f"Hidden size mismatch"
47+
assert hidden_states.shape[1] == w1.shape[2] - padded_size, "Hidden size mismatch"
4848
assert topk_weights.shape == topk_ids.shape, "topk shape mismatch"
4949
assert hidden_states.is_contiguous(), "Hidden_states must be contiguous"
5050
assert w1.is_contiguous(), "Expert weights1 must be contiguous"

slime/backends/megatron_utils/arguments.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def set_default_megatron_args(args):
2424
args.padded_vocab_size = _vocab_size_with_padding(args.vocab_size, args)
2525

2626
if not args.tokenizer_model and not args.tokenizer_type:
27-
logger.info(f"--tokenizer-model not set, use --hf-checkpoint as tokenizer model.")
27+
logger.info("--tokenizer-model not set, use --hf-checkpoint as tokenizer model.")
2828
args.tokenizer_model = args.hf_checkpoint
2929
args.tokenizer_type = "HuggingFaceTokenizer"
3030
return args

slime/ray/rollout.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ def _log_rollout_data(rollout_id, args, samples, rollout_extra_metrics, rollout_
522522
if args.rollout_num_gpus:
523523
log_dict["perf/tokens_per_gpu_per_sec"] = sum(response_lengths) / rollout_time / args.rollout_num_gpus
524524
log_dict["perf/longest_sample_tokens_per_sec"] = max(response_lengths) / rollout_time
525-
log_dict |= dict_add_prefix(_compute_metrics_from_samples(args, samples), f"rollout/")
525+
log_dict |= dict_add_prefix(_compute_metrics_from_samples(args, samples), "rollout/")
526526
logger.info(f"perf {rollout_id}: {log_dict}")
527527
step = compute_rollout_step(args, rollout_id)
528528
log_dict["rollout/step"] = step
@@ -533,7 +533,7 @@ def _compute_metrics_from_samples(args, samples):
533533
response_lengths = [sample.effective_response_length for sample in samples]
534534

535535
log_dict = {}
536-
log_dict |= dict_add_prefix(compute_statistics(response_lengths), f"response_len/")
536+
log_dict |= dict_add_prefix(compute_statistics(response_lengths), "response_len/")
537537
log_dict |= _compute_zero_std_metrics(args, samples)
538538
log_dict |= _compute_spec_metrics(args, samples)
539539
log_dict |= _compute_reward_cat_metrics(args, samples)

slime/ray/train_actor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def init(self, args, role, with_ref=False):
8181

8282
try:
8383
if torch.version.hip is not None:
84-
logger.info(f"Detected ROCm/HIP environment, skipping NUMA affinity setup")
84+
logger.info("Detected ROCm/HIP environment, skipping NUMA affinity setup")
8585
# will find the coresponding API to implement ROCm version as below
8686
else:
8787
import pynvml
@@ -97,7 +97,7 @@ def init(self, args, role, with_ref=False):
9797
pynvml.nvmlShutdown()
9898

9999
except ImportError:
100-
logger.info(f"Warning: pynvml not available, skipping NUMA affinity setup")
100+
logger.info("Warning: pynvml not available, skipping NUMA affinity setup")
101101
except Exception as e:
102102
logger.info(f"Warning: Failed to set NUMA affinity: {e}")
103103

0 commit comments

Comments
 (0)