@@ -63,9 +63,6 @@ def vllm(
6363 num_fewshot_seeds : Annotated [
6464 int , Option (help = "Number of seeds to use for few-shot evaluation." , rich_help_panel = HELP_PANEL_NAME_1 )
6565 ] = 1 ,
66- load_responses_from_details_date_id : Annotated [
67- Optional [str ], Option (help = "Load responses from details directory." , rich_help_panel = HELP_PANEL_NAME_1 )
68- ] = None ,
6966 # === saving ===
7067 output_dir : Annotated [
7168 str , Option (help = "Output directory for evaluation results." , rich_help_panel = HELP_PANEL_NAME_2 )
@@ -127,18 +124,18 @@ def vllm(
127124 max_samples = max_samples ,
128125 use_chat_template = use_chat_template ,
129126 system_prompt = system_prompt ,
130- load_responses_from_details_date_id = load_responses_from_details_date_id ,
131127 )
132128
133129 if model_args .endswith (".yaml" ):
134130 with open (model_args , "r" ) as f :
135131 config = yaml .safe_load (f )["model" ]
132+ model_args = config ["base_params" ]["model_args" ]
136133 generation_parameters = GenerationParameters .from_dict (config )
137- model_config = VLLMModelConfig (config , generation_parameters = generation_parameters )
138-
139134 else :
140- model_args_dict : dict = {k .split ("=" )[0 ]: k .split ("=" )[1 ] if "=" in k else True for k in model_args .split ("," )}
141- model_config = VLLMModelConfig (** model_args_dict )
135+ generation_parameters = GenerationParameters ()
136+
137+ model_args_dict : dict = {k .split ("=" )[0 ]: k .split ("=" )[1 ] if "=" in k else True for k in model_args .split ("," )}
138+ model_config = VLLMModelConfig (** model_args_dict , generation_parameters = generation_parameters )
142139
143140 pipeline = Pipeline (
144141 tasks = tasks ,
0 commit comments