File tree Expand file tree Collapse file tree 1 file changed +1
-12
lines changed
src/compressed_tensors/quantization/lifecycle Expand file tree Collapse file tree 1 file changed +1
-12
lines changed Original file line number Diff line number Diff line change @@ -213,18 +213,7 @@ def apply_quantization_status(module: Module, status: QuantizationStatus):
213
213
214
214
force_zero_point_init = status != QuantizationStatus .COMPRESSED
215
215
216
- # When decompressing, we set the scale_dtype as the model's dtype
217
- # This is because the normal workflow of using the weight's dtype
218
- # will be incorrect as the model weight will be compressed
219
- # Therefore, use the dtype set by the user using the PretrainedModel
220
- scale_dtype = None
221
- if status == QuantizationStatus .FROZEN :
222
- if hasattr (module , "dtype" ):
223
- scale_dtype = module .dtype
224
-
225
- initialize_module_for_quantization (
226
- module , force_zero_point = force_zero_point_init , scale_dtype = scale_dtype
227
- )
216
+ initialize_module_for_quantization (module , force_zero_point = force_zero_point_init )
228
217
229
218
module .quantization_status = status
230
219
You can’t perform that action at this time.
0 commit comments