6262from datasets import load_dataset
6363from peft import LoraConfig
6464from qwen_vl_utils import process_vision_info
65- from transformers import AutoModelForImageTextToText , AutoProcessor , BitsAndBytesConfig , Qwen2VLProcessor
65+ from transformers import AutoModelForImageTextToText , BitsAndBytesConfig , Qwen2VLProcessor
6666
6767from trl import ModelConfig , ScriptArguments , SFTConfig , SFTTrainer , TrlParser , get_kbit_device_map
6868
@@ -224,10 +224,6 @@ class CustomScriptArguments(ScriptArguments):
224224 model .config .use_reentrant = False
225225 model .enable_input_require_grads ()
226226
227- processor = AutoProcessor .from_pretrained (
228- model_args .model_name_or_path , trust_remote_code = model_args .trust_remote_code
229- )
230-
231227 # Prepare dataset
232228 prepared_dataset = [prepare_dataset (example , script_args .video_cache_dir ) for example in dataset ]
233229
@@ -238,7 +234,6 @@ class CustomScriptArguments(ScriptArguments):
238234 train_dataset = prepared_dataset ,
239235 data_collator = collate_fn ,
240236 peft_config = peft_config ,
241- processing_class = processor ,
242237 )
243238
244239 # Train model
@@ -248,8 +243,6 @@ class CustomScriptArguments(ScriptArguments):
248243 trainer .save_model (training_args .output_dir )
249244 if training_args .push_to_hub :
250245 trainer .push_to_hub (dataset_name = script_args .dataset_name )
251- if trainer .accelerator .is_main_process :
252- processor .push_to_hub (training_args .hub_model_id )
253246
254247 # Cleanup
255248 del model
0 commit comments