File tree Expand file tree Collapse file tree 1 file changed +6
-2
lines changed
src/compressed_tensors/quantization/lifecycle Expand file tree Collapse file tree 1 file changed +6
-2
lines changed Original file line number Diff line number Diff line change @@ -107,8 +107,8 @@ def load_pretrained_quantization(model: Module, model_name_or_path: str):
107
107
108
108
109
109
def apply_quantization_config (
110
- model : Module , config : QuantizationConfig , run_compressed : bool = False
111
- ) -> Dict :
110
+ model : Module , config : Union [ QuantizationConfig , None ] , run_compressed : bool = False
111
+ ) -> OrderedDict :
112
112
"""
113
113
Initializes the model for quantization in-place based on the given config
114
114
@@ -117,6 +117,10 @@ def apply_quantization_config(
117
117
:param run_compressed: Whether the model will be run in compressed mode or
118
118
decompressed fully on load
119
119
"""
120
+ # Workaround for when HF Quantizer passes None, see PR #180
121
+ if config is None :
122
+ return OrderedDict ()
123
+
120
124
# remove reference to the original `config`
121
125
# argument. This function can mutate it, and we'd
122
126
# like to keep the original `config` as it is.
You can’t perform that action at this time.
0 commit comments