We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 26c52a5 commit 3a765bdCopy full SHA for 3a765bd
vllm/config.py
@@ -185,6 +185,11 @@ def _verify_cuda_graph(self) -> None:
185
self.max_context_len_to_capture = self.max_model_len
186
self.max_context_len_to_capture = min(self.max_context_len_to_capture,
187
self.max_model_len)
188
+ if self.quantization == "gptq" and not self.enforce_eager:
189
+ # Related issue: https://github.com/vllm-project/vllm/issues/2147
190
+ logger.warning("GPTQ does not support CUDA graph yet. Disabling "
191
+ "CUDA graph.")
192
+ self.enforce_eager = True
193
194
def verify_with_parallel_config(
195
self,
0 commit comments