We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
modules_to_not_convert
1 parent e0c1575 commit c5e8330Copy full SHA for c5e8330
vllm/model_executor/layers/quantization/fbgemm_fp8.py
@@ -31,7 +31,7 @@ class FBGEMMFp8Config(QuantizationConfig):
31
"""Config class for FBGEMM Fp8."""
32
33
def __init__(self, ignore_list: List[str], input_scale_ub: float):
34
- self.ignore_list = ignore_list
+ self.ignore_list = ignore_list if ignore_list else []
35
self.input_scale_ub = input_scale_ub
36
37
# For GPUs that lack FP8 hardware support, we can leverage the Marlin
0 commit comments