We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 339b7e6 commit a9d4d93Copy full SHA for a9d4d93
modelopt/torch/quantization/plugins/transformers_trainer.py
@@ -191,7 +191,8 @@ def _restore_modelopt_state_with_weights(self):
191
modelopt_state = torch.load(self._modelopt_state_path, weights_only=False)
192
modelopt_weights = modelopt_state.pop("modelopt_state_weights", None)
193
restore_from_modelopt_state(self.model, modelopt_state)
194
- set_quantizer_state_dict(self.model, modelopt_weights)
+ if modelopt_weights is not None:
195
+ set_quantizer_state_dict(self.model, modelopt_weights)
196
print_rank_0("Restored modelopt state with weights.")
197
198
def _quantize_model(self):
0 commit comments