Skip to content

Commit df9543c

Browse files
eshwarprasadSRobotSail
authored andcommitted
typing fixes to accommodate API call args, linting...
Signed-off-by: eshwarprasadS <[email protected]>
1 parent a36c3aa commit df9543c

File tree

2 files changed

+10
-8
lines changed

2 files changed

+10
-8
lines changed

src/instructlab/eval/longbench.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -245,15 +245,15 @@ def run(
245245
"tokenizer": model_path,
246246
"base_url": base_url,
247247
}
248-
# Optionally add max_length if you want
248+
# Optionally add max_length
249249
if "max_length" in final_openai_config:
250-
model_args["max_length"] = final_openai_config["max_length"]
250+
model_args["max_length"] = str(final_openai_config["max_length"])
251251

252252
if api_key:
253-
model_args["api_key"] = api_key
253+
model_args["api_key"] = str(api_key)
254254

255255
# Add any other openai_config keys if needed
256-
# model_args.update(final_openai_config) # Only if you want to pass more
256+
# model_args.update(final_openai_config)
257257

258258
# Run evaluation
259259
results = simple_evaluate(
@@ -267,10 +267,11 @@ def run(
267267
# Prepare vLLM model args
268268
model_args = {
269269
"pretrained": model_path,
270-
"data_parallel_size": num_gpus,
270+
"data_parallel_size": str(num_gpus),
271271
}
272-
# Add vllm config properly
273-
model_args.update(final_vllm_config)
272+
# Add vllm config properly - convert all values to strings
273+
string_vllm_config = {k: str(v) for k, v in final_vllm_config.items()}
274+
model_args.update(string_vllm_config)
274275

275276
# Run evaluation
276277
results = simple_evaluate(

tests/test_project.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@
55
# First Party
66
from instructlab.eval.evaluator import Evaluator
77
from instructlab.eval.leaderboard import LeaderboardV2Evaluator
8+
from instructlab.eval.longbench import LongBenchEvaluator
89
from instructlab.eval.mmlu import MMLUBranchEvaluator, MMLUEvaluator
910
from instructlab.eval.mt_bench import MTBenchBranchEvaluator, MTBenchEvaluator
10-
from instructlab.eval.longbench import LongBenchEvaluator
11+
1112

1213
def test_evaluator_eps():
1314
expected = {

0 commit comments

Comments
 (0)