We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3f79295 commit d57e3d6Copy full SHA for d57e3d6
lm_eval/models/vllm_causallms.py
@@ -216,9 +216,17 @@ def __init__(
216
}
217
218
if parse_version(version("vllm")) >= parse_version("0.9.0"):
219
- kwargs_resolve_hf_chat_template["model_config"] = (
220
- self.model.llm_engine.model_config
221
- )
+ if self.data_parallel_size <= 1:
+ kwargs_resolve_hf_chat_template["model_config"] = (
+ self.model.llm_engine.model_config
222
+ )
223
+ else:
224
+ from vllm.engine.arg_utils import EngineArgs
225
+
226
+ engine_args = EngineArgs(**self.model_args)
227
+ model_config = engine_args.create_model_config()
228
229
+ kwargs_resolve_hf_chat_template["model_config"] = model_config
230
231
# https://github.com/vllm-project/vllm/pull/18259
232
if (
0 commit comments