diff --git a/src/llmcompressor/transformers/finetune/session_mixin.py b/src/llmcompressor/transformers/finetune/session_mixin.py index e344705d7..fcb2d3dfb 100644 --- a/src/llmcompressor/transformers/finetune/session_mixin.py +++ b/src/llmcompressor/transformers/finetune/session_mixin.py @@ -399,13 +399,12 @@ def save_model( # is True by default to avoid high runtime cost self.save_state() if self.accelerator.is_main_process: - processor = getattr(self, "processing_class", self.tokenizer) # TODO: need to port over all saving parameters so that all # checkpoints are saved in the same way save_checkpoint( output_dir, model=self.model, - processor=processor, + processor=self.processing_class, save_safetensors=self.args.save_safetensors, save_compressed=self.model_args.save_compressed, skip_sparsity_compression_stats=skip_sparsity_compression_stats,