We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4607dfc commit f529d43Copy full SHA for f529d43
modelopt/torch/quantization/plugins/transformers_trainer.py
@@ -194,8 +194,8 @@ def _restore_modelopt_state_with_weights(self):
194
195
def _quantize_model(self):
196
"""Quantize the model. Restore the quantization state if it exists."""
197
- num_samples = min(self.quant_args.calib_size, len(self.eval_dataset)) # type: ignore [union-attr]
198
dataset = self.train_dataset if self.train_dataset is not None else self.eval_dataset
+ num_samples = min(self.quant_args.calib_size, len(dataset)) # type: ignore [union-attr]
199
dataset = torch.utils.data.Subset(dataset, list(range(num_samples)))
200
data_loader = self.get_eval_dataloader(dataset)
201
0 commit comments