Skip to content

Commit 78d274d

Browse files
more clenaup
Signed-off-by: Brian Dellabetta <[email protected]>
1 parent 7cdd1cd commit 78d274d

File tree

1 file changed

+2
-11
lines changed
  • src/compressed_tensors/quantization/lifecycle

1 file changed

+2
-11
lines changed

src/compressed_tensors/quantization/lifecycle/apply.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,7 @@
3838
infer_quantization_status,
3939
is_kv_cache_quant_scheme,
4040
)
41-
from compressed_tensors.utils.helpers import (
42-
fix_fsdp_module_name,
43-
deprecated,
44-
replace_module,
45-
)
41+
from compressed_tensors.utils.helpers import deprecated, replace_module
4642
from compressed_tensors.utils.match import match_named_modules, match_targets
4743
from compressed_tensors.utils.offload import update_parameter_data
4844
from compressed_tensors.utils.safetensors_load import get_safetensors_folder
@@ -150,9 +146,6 @@ def apply_quantization_config(
150146
for name, submodule in match_named_modules(
151147
model, target_to_scheme, config.ignore, warn_on_fail=True
152148
):
153-
# potentially fix module name to remove FSDP wrapper prefix
154-
name = fix_fsdp_module_name(name)
155-
156149
# mark modules to be quantized by adding
157150
# quant scheme to the matching layers
158151
scheme = _scheme_from_targets(target_to_scheme, scheme.targets, name)
@@ -161,9 +154,7 @@ def apply_quantization_config(
161154
and config.format != CompressionFormat.dense.value
162155
and isinstance(submodule, torch.nn.Linear)
163156
):
164-
from compressed_tensors.linear.compressed_linear import (
165-
CompressedLinear,
166-
)
157+
from compressed_tensors.linear.compressed_linear import CompressedLinear
167158

168159
compressed_linear = CompressedLinear.from_linear(
169160
submodule,

0 commit comments

Comments
 (0)