38
38
infer_quantization_status ,
39
39
is_kv_cache_quant_scheme ,
40
40
)
41
- from compressed_tensors .utils .helpers import (
42
- fix_fsdp_module_name ,
43
- deprecated ,
44
- replace_module ,
45
- )
41
+ from compressed_tensors .utils .helpers import deprecated , replace_module
46
42
from compressed_tensors .utils .match import match_named_modules , match_targets
47
43
from compressed_tensors .utils .offload import update_parameter_data
48
44
from compressed_tensors .utils .safetensors_load import get_safetensors_folder
@@ -150,9 +146,6 @@ def apply_quantization_config(
150
146
for name , submodule in match_named_modules (
151
147
model , target_to_scheme , config .ignore , warn_on_fail = True
152
148
):
153
- # potentially fix module name to remove FSDP wrapper prefix
154
- name = fix_fsdp_module_name (name )
155
-
156
149
# mark modules to be quantized by adding
157
150
# quant scheme to the matching layers
158
151
scheme = _scheme_from_targets (target_to_scheme , scheme .targets , name )
@@ -161,9 +154,7 @@ def apply_quantization_config(
161
154
and config .format != CompressionFormat .dense .value
162
155
and isinstance (submodule , torch .nn .Linear )
163
156
):
164
- from compressed_tensors .linear .compressed_linear import (
165
- CompressedLinear ,
166
- )
157
+ from compressed_tensors .linear .compressed_linear import CompressedLinear
167
158
168
159
compressed_linear = CompressedLinear .from_linear (
169
160
submodule ,
0 commit comments