@@ -245,15 +245,15 @@ def run(
245245 "tokenizer" : model_path ,
246246 "base_url" : base_url ,
247247 }
248- # Optionally add max_length if you want
248+ # Optionally add max_length
249249 if "max_length" in final_openai_config :
250- model_args ["max_length" ] = final_openai_config ["max_length" ]
250+ model_args ["max_length" ] = str ( final_openai_config ["max_length" ])
251251
252252 if api_key :
253- model_args ["api_key" ] = api_key
253+ model_args ["api_key" ] = str ( api_key )
254254
255255 # Add any other openai_config keys if needed
256- # model_args.update(final_openai_config) # Only if you want to pass more
256+ # model_args.update(final_openai_config)
257257
258258 # Run evaluation
259259 results = simple_evaluate (
@@ -267,10 +267,11 @@ def run(
267267 # Prepare vLLM model args
268268 model_args = {
269269 "pretrained" : model_path ,
270- "data_parallel_size" : num_gpus ,
270+ "data_parallel_size" : str ( num_gpus ) ,
271271 }
272- # Add vllm config properly
273- model_args .update (final_vllm_config )
272+ # Add vllm config properly - convert all values to strings
273+ string_vllm_config = {k : str (v ) for k , v in final_vllm_config .items ()}
274+ model_args .update (string_vllm_config )
274275
275276 # Run evaluation
276277 results = simple_evaluate (
0 commit comments