Skip to content

Commit 246d711

Browse files
committed
update
1 parent 30ae05c commit 246d711

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -623,7 +623,7 @@ def decompress(self, model_path: str, model: Module):
623623
self.sparsity_compressor is not None
624624
and self.sparsity_config.format != CompressionFormat.dense.value
625625
):
626-
# note - decompress only support one compressor so far
626+
# note - decompress only supports one compressor atm
627627
quant_compressor = next(iter(self.quantization_compressor))
628628
params_to_ignore = None
629629
if self.quantization_compressor is not None:

src/compressed_tensors/quantization/quant_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def to_dict(self):
165165

166166
@staticmethod
167167
def from_pretrained(
168-
model: Module, format: Optional[Union[List[str], str]] = None
168+
model: Module, format: Optional[str] = None
169169
) -> Optional["QuantizationConfig"]:
170170
"""
171171
Converts a model into its associated QuantizationConfig based on the

tests/test_compressors/model_compressors/test_model_compressor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ def _get_combined_config(s_config, q_config):
395395
)
396396
def test_compress_model(model_stub, q_format, s_config, tmpdir):
397397
model = AutoModelForCausalLM.from_pretrained(model_stub, torch_dtype=torch.float32)
398-
compressor = ModelCompressor.from_pretrained_model(model, s_config, q_format)
398+
compressor = ModelCompressor.from_pretrained_model(model, s_config, [q_format])
399399

400400
# compress model by eagerly compressing state dict
401401
true_compressed = dict(compressor.compress(model))

0 commit comments

Comments
 (0)