Skip to content

Commit c49a94b

Browse files
committed
update
1 parent 926c2bc commit c49a94b

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ def decompress(self, model_path: str, model: Module):
608608
self.sparsity_compressor is not None
609609
and self.sparsity_config.format != CompressionFormat.dense.value
610610
):
611-
# note - decompress only support one compressor so far
611+
# note - decompress only supports one compressor atm
612612
quant_compressor = next(iter(self.quantization_compressor))
613613
params_to_ignore = None
614614
if self.quantization_compressor is not None:

src/compressed_tensors/quantization/quant_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def to_dict(self):
162162

163163
@staticmethod
164164
def from_pretrained(
165-
model: Module, format: Optional[Union[List[str], str]] = None
165+
model: Module, format: Optional[str] = None
166166
) -> Optional["QuantizationConfig"]:
167167
"""
168168
Converts a model into its associated QuantizationConfig based on the

tests/test_compressors/model_compressors/test_model_compressor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ def _get_combined_config(s_config, q_config):
395395
)
396396
def test_compress_model(model_stub, q_format, s_config, tmpdir):
397397
model = AutoModelForCausalLM.from_pretrained(model_stub, torch_dtype=torch.float32)
398-
compressor = ModelCompressor.from_pretrained_model(model, s_config, q_format)
398+
compressor = ModelCompressor.from_pretrained_model(model, s_config, [q_format])
399399

400400
# compress model by eagerly compressing state dict
401401
true_compressed = dict(compressor.compress(model))

0 commit comments

Comments
 (0)