Skip to content

Commit ab384f0

Browse files
author
Your Name
committed
minor
1 parent ecfc043 commit ab384f0

File tree

2 files changed

+1
-1
lines changed

2 files changed

+1
-1
lines changed

examples/llm_qat/README.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ def forward_loop(model):
8282

8383

8484
# Quantize the model in-place; The model should be unwrapped from any distributed wrapper
85-
# The model may be wrapped in a DataParallel or DistributedDataParallel after `mtq.quantize`
8685
model = mtq.quantize(model, mtq.INT8_DEFAULT_CFG, forward_loop)
8786

8887
# Save the modelopt quantizer states

modelopt/torch/quantization/plugins/transformers_trainer.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ def _restore_modelopt_state_with_weights(self):
195195
def _quantize_model(self):
196196
"""Quantize the model. Restore the quantization state if it exists."""
197197
dataset = self.train_dataset if self.train_dataset is not None else self.eval_dataset
198+
assert dataset is not None, "Calibration requires either eval or train dataset."
198199
num_samples = min(self.quant_args.calib_size, len(dataset)) # type: ignore [union-attr]
199200
dataset = torch.utils.data.Subset(dataset, list(range(num_samples)))
200201
data_loader = self.get_eval_dataloader(dataset)

0 commit comments

Comments
 (0)