You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: src/lighteval/models/vllm/vllm_model.py
+8Lines changed: 8 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -81,6 +81,8 @@ class VLLMModelConfig(ModelConfig):
81
81
pipeline_parallel_size: PositiveInt=1# how many GPUs to use for pipeline parallelism
82
82
gpu_memory_utilization: NonNegativeFloat=0.9# lower this if you are running out of memory
83
83
max_model_length: PositiveInt|None=None# maximum length of the model, ussually infered automatically. reduce this if you encouter OOM issues, 4096 is usually enough
84
+
quantization: str|None=None
85
+
load_format: str|None=None
84
86
swap_space: PositiveInt=4# CPU swap space size (GiB) per GPU.
0 commit comments