From 1800beb13f407ddb881d0af936860643e84ba085 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 3 Oct 2025 15:39:19 +0000 Subject: [PATCH 1/8] dep --- ISSUES.md | 2 +- docs/source/ar/run_scripts.md | 26 ----------------- docs/source/ar/trainer.md | 2 -- docs/source/de/run_scripts.md | 26 ----------------- docs/source/en/deepspeed.md | 4 +-- docs/source/en/run_scripts.md | 7 +---- docs/source/en/trainer.md | 3 +- docs/source/es/run_scripts.md | 26 ----------------- docs/source/es/trainer.md | 2 -- docs/source/fr/run_scripts_fr.md | 28 +------------------ docs/source/it/run_scripts.md | 26 ----------------- docs/source/ja/main_classes/deepspeed.md | 6 ++-- docs/source/ja/main_classes/trainer.md | 3 -- docs/source/ja/run_scripts.md | 27 ------------------ docs/source/ko/deepspeed.md | 4 +-- docs/source/ko/perf_train_special.md | 3 +- docs/source/ko/run_scripts.md | 26 ----------------- docs/source/ko/trainer.md | 2 -- docs/source/pt/run_scripts.md | 26 ----------------- docs/source/zh/main_classes/deepspeed.md | 6 ++-- docs/source/zh/main_classes/trainer.md | 3 -- docs/source/zh/perf_train_special.md | 1 - docs/source/zh/run_scripts.md | 27 ------------------ .../multiple_choice/run_multiple_choice.py | 11 -------- .../legacy/question-answering/run_squad.py | 15 ---------- .../question-answering/run_squad_trainer.py | 11 -------- examples/legacy/run_language_modeling.py | 10 ------- examples/legacy/run_swag.py | 15 ---------- examples/legacy/seq2seq/finetune_trainer.py | 3 -- .../seq2seq/train_distil_marian_enro.sh | 2 +- .../seq2seq/train_distil_marian_enro_tpu.sh | 2 +- .../legacy/seq2seq/train_distilbart_cnn.sh | 2 +- .../legacy/seq2seq/train_mbart_cc25_enro.sh | 2 +- examples/legacy/seq2seq/utils.py | 24 ---------------- .../legacy/token-classification/run_ner.py | 11 -------- examples/pytorch/README.md | 3 +- .../pytorch/audio-classification/README.md | 2 -- .../run_audio_classification.py | 18 ------------ .../pytorch/contrastive-image-text/README.md | 1 - .../contrastive-image-text/run_clip.py | 17 ----------- .../run_image_classification.py | 17 ----------- examples/pytorch/image-pretraining/README.md | 2 -- examples/pytorch/image-pretraining/run_mae.py | 17 ----------- examples/pytorch/image-pretraining/run_mim.py | 17 ----------- .../run_instance_segmentation.py | 12 -------- examples/pytorch/language-modeling/run_clm.py | 17 ----------- examples/pytorch/language-modeling/run_fim.py | 17 ----------- examples/pytorch/language-modeling/run_mlm.py | 17 ----------- examples/pytorch/language-modeling/run_plm.py | 17 ----------- examples/pytorch/multiple-choice/run_swag.py | 17 ----------- .../object-detection/run_object_detection.py | 17 ----------- examples/pytorch/old_test_xla_examples.py | 1 - examples/pytorch/question-answering/run_qa.py | 17 ----------- .../question-answering/run_qa_beam_search.py | 17 ----------- .../question-answering/run_seq2seq_qa.py | 17 ----------- .../run_semantic_segmentation.py | 17 ----------- examples/pytorch/speech-recognition/README.md | 7 ----- .../run_speech_recognition_ctc.py | 19 +------------ .../run_speech_recognition_ctc_adapter.py | 19 +------------ .../run_speech_recognition_seq2seq.py | 17 ----------- examples/pytorch/summarization/README.md | 2 -- .../summarization/run_summarization.py | 17 ----------- examples/pytorch/test_pytorch_examples.py | 17 ----------- .../text-classification/run_classification.py | 17 ----------- .../pytorch/text-classification/run_glue.py | 17 ----------- .../pytorch/text-classification/run_xnli.py | 17 ----------- .../pytorch/token-classification/run_ner.py | 17 ----------- examples/pytorch/translation/README.md | 5 ---- .../pytorch/translation/run_translation.py | 17 ----------- .../integrations/integration_utils.py | 2 +- src/transformers/training_args.py | 6 +--- .../run_{{cookiecutter.example_shortcut}}.py | 23 ++------------- tests/deepspeed/test_deepspeed.py | 3 -- tests/deepspeed/test_model_zoo.py | 1 - tests/extended/test_trainer_ext.py | 1 - tests/fsdp/test_fsdp.py | 1 - tests/sagemaker/conftest.py | 1 - .../pytorch/run_glue_model_parallelism.py | 19 +------------ tests/trainer/test_trainer.py | 1 - 79 files changed, 27 insertions(+), 890 deletions(-) diff --git a/ISSUES.md b/ISSUES.md index c87bd9fc2c3f..77de2998ad0b 100644 --- a/ISSUES.md +++ b/ISSUES.md @@ -153,7 +153,7 @@ You are not required to read the following guidelines before opening an issue. H cd examples/seq2seq torchrun --nproc_per_node=2 ./finetune_trainer.py \ --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \ - --output_dir output_dir --overwrite_output_dir \ + --output_dir output_dir \ --do_train --n_train 500 --num_train_epochs 1 \ --per_device_train_batch_size 1 --freeze_embeds \ --src_lang en_XX --tgt_lang ro_RO --task translation \ diff --git a/docs/source/ar/run_scripts.md b/docs/source/ar/run_scripts.md index 238844dc055e..052e6e1d8440 100644 --- a/docs/source/ar/run_scripts.md +++ b/docs/source/ar/run_scripts.md @@ -93,7 +93,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -117,7 +116,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -140,7 +138,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -197,7 +194,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -225,7 +221,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -239,25 +234,6 @@ examples/pytorch/summarization/run_summarization.py -h خيار آخر مفيد لتمكينه هو استئناف التدريب من نقطة تفتيش سابقة. سيضمن ذلك أنك تستطيع الاستمرار من حيث توقفت دون البدء من جديد إذا تم مقاطعة تدريبك. هناك طريقتان لاستئناف التدريب من نقطة تفتيش. -تستخدم الطريقة الأولى المعلمة `output_dir previous_output_dir` لاستئناف التدريب من أحدث نقطة تفتيش مخزنة في `output_dir`. في هذه الحالة، يجب عليك إزالة `overwrite_output_dir`: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -تستخدم الطريقة الثانية معلمة `resume_from_checkpoint path_to_specific_checkpoint` لاستئناف التدريب من مجلد نقطة تفتيش محددة. - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -269,7 +245,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -301,6 +276,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ar/trainer.md b/docs/source/ar/trainer.md index 1784d76a4ecb..be9e44cfa620 100644 --- a/docs/source/ar/trainer.md +++ b/docs/source/ar/trainer.md @@ -611,7 +611,6 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` يمكنك أيضًا تحديد المعلمات من ملف `config_file.yaml` مباشرة في سطر الأوامر: @@ -634,7 +633,6 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` اطلع على برنامج تعليمي [Launching your Accelerate scripts](https://huggingface.co/docs/accelerate/basic_tutorials/launch) لمعرفة المزيد حول `accelerate_launch` والتكوينات المخصصة. diff --git a/docs/source/de/run_scripts.md b/docs/source/de/run_scripts.md index 004f67291979..833d886c7e81 100644 --- a/docs/source/de/run_scripts.md +++ b/docs/source/de/run_scripts.md @@ -98,7 +98,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -122,7 +121,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -144,7 +142,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -201,7 +198,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -229,7 +225,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -243,25 +238,6 @@ examples/pytorch/summarization/run_summarization.py -h Eine weitere hilfreiche Option, die Sie aktivieren können, ist die Wiederaufnahme des Trainings von einem früheren Kontrollpunkt aus. Auf diese Weise können Sie im Falle einer Unterbrechung Ihres Trainings dort weitermachen, wo Sie aufgehört haben, ohne von vorne beginnen zu müssen. Es gibt zwei Methoden, um das Training von einem Kontrollpunkt aus wieder aufzunehmen. -Die erste Methode verwendet das Argument `output_dir previous_output_dir`, um das Training ab dem letzten in `output_dir` gespeicherten Kontrollpunkt wieder aufzunehmen. In diesem Fall sollten Sie `overwrite_output_dir` entfernen: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -Die zweite Methode verwendet das Argument `Resume_from_checkpoint path_to_specific_checkpoint`, um das Training ab einem bestimmten Checkpoint-Ordner wieder aufzunehmen. - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -273,7 +249,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -305,6 +280,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/en/deepspeed.md b/docs/source/en/deepspeed.md index 55b8408a43bb..7971854011ee 100644 --- a/docs/source/en/deepspeed.md +++ b/docs/source/en/deepspeed.md @@ -593,7 +593,7 @@ To deploy DeepSpeed on multiple GPUs, add `--num_gpus`. You don't need to add `- deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -616,7 +616,7 @@ To deploy DeepSpeed on a single GPU, add `--num_gpus`. You don't need to add `-- deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro diff --git a/docs/source/en/run_scripts.md b/docs/source/en/run_scripts.md index 594eb84b02a1..74473b5228e3 100644 --- a/docs/source/en/run_scripts.md +++ b/docs/source/en/run_scripts.md @@ -61,9 +61,8 @@ The example below fine-tunes [T5-small](https://huggingface.co/google-t5/t5-smal The example script downloads and preprocesses a dataset, and then fine-tunes it with [`Trainer`] with a supported model architecture. -Resuming training from a checkpoint is very useful if training is interrupted because you don't have to start over again. There are two ways to resume training from a checkpoint. +Resuming training from a checkpoint is very useful if training is interrupted because you don't have to start over again: -* `--output dir previous_output_dir` resumes training from the latest checkpoint stored in `output_dir`. Remove the `--overwrite_output_dir` parameter if you're using this method. * `--resume_from_checkpoint path_to_specific_checkpoint` resumes training from a specific checkpoint folder. Share your model on the [Hub](https://huggingface.co/) with the `--push_to_hub` parameter. It creates a repository and uploads the model to the folder name specified in `--output_dir`. You could also use the `--push_to_hub_model_id` parameter to specify the repository name. @@ -85,9 +84,6 @@ python examples/pytorch/summarization/run_summarization.py \ --per_device_eval_batch_size=4 \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ - # remove if using `output_dir previous_output_dir` - # --overwrite_output_dir \ - --output_dir previous_output_dir \ # --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate \ ``` @@ -168,7 +164,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate \ diff --git a/docs/source/en/trainer.md b/docs/source/en/trainer.md index 32f14bc41da3..98b23a3e7b94 100644 --- a/docs/source/en/trainer.md +++ b/docs/source/en/trainer.md @@ -361,8 +361,7 @@ accelerate launch \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir + --output_dir /tmp/$TASK_NAME/ ``` > [!TIP] diff --git a/docs/source/es/run_scripts.md b/docs/source/es/run_scripts.md index 462eb5bc3034..6db78af2ce5e 100644 --- a/docs/source/es/run_scripts.md +++ b/docs/source/es/run_scripts.md @@ -98,7 +98,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -122,7 +121,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -144,7 +142,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -201,7 +198,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -229,7 +225,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -243,25 +238,6 @@ examples/pytorch/summarization/run_summarization.py -h Otra opción útil para habilitar es reanudar el entrenamiento desde un punto de control anterior. Esto asegurará que puedas continuar donde lo dejaste sin comenzar de nuevo si tu entrenamiento se interrumpe. Hay dos métodos para reanudar el entrenamiento desde un punto de control. -El primer método utiliza el argumento `output_dir previous_output_dir` para reanudar el entrenamiento desde el último punto de control almacenado en `output_dir`. En este caso, debes eliminar `overwrite_output_dir`: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -El segundo método utiliza el argumento `resume_from_checkpoint path_to_specific_checkpoint` para reanudar el entrenamiento desde una carpeta de punto de control específica. - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -273,7 +249,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -305,6 +280,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/es/trainer.md b/docs/source/es/trainer.md index 4455521f5317..335ec54b39bd 100644 --- a/docs/source/es/trainer.md +++ b/docs/source/es/trainer.md @@ -381,7 +381,6 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` También puedes especificar los parámetros del archivo config_file.yaml directamente en la línea de comandos: @@ -404,7 +403,6 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` Consulta el tutorial [Lanzamiento de tus scripts con Accelerate](https://huggingface.co/docs/accelerate/basic_tutorials/launch) para obtener más información sobre `accelerate_launch` y las configuraciones personalizadas. diff --git a/docs/source/fr/run_scripts_fr.md b/docs/source/fr/run_scripts_fr.md index 1acf683253da..43e3ed024150 100644 --- a/docs/source/fr/run_scripts_fr.md +++ b/docs/source/fr/run_scripts_fr.md @@ -100,7 +100,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -124,7 +123,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -147,7 +145,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -204,7 +201,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -231,7 +227,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -243,26 +238,7 @@ examples/pytorch/summarization/run_summarization.py -h ## Reprendre l'entraînement à partir d'un point de contrôle -Une autre option utile est de reprendre l'entraînement à partir d'un point de contrôle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu. Il existe deux méthodes pour reprendre l'entraînement à partir d'un point de contrôle. - -La première méthode utilise l'argument `output_dir previous_output_dir` pour reprendre l'entraînement à partir du dernier point de contrôle stocké dans `output_dir`. Dans ce cas, vous devez supprimer l'argument `overwrite_output_dir`. - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -La seconde méthode utilise l'argument `resume_from_checkpoint path_to_specific_checkpoint` pour reprendre l'entraînement à partir d'un dossier de point de contrôle spécifique. +Une autre option utile est de reprendre l'entraînement à partir d'un point de contrôle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu: ```bash python examples/pytorch/summarization/run_summarization.py @@ -275,7 +251,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -308,6 +283,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/it/run_scripts.md b/docs/source/it/run_scripts.md index ad7df423cb96..e0986b839771 100644 --- a/docs/source/it/run_scripts.md +++ b/docs/source/it/run_scripts.md @@ -98,7 +98,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -122,7 +121,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -144,7 +142,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -201,7 +198,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -229,7 +225,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -243,25 +238,6 @@ examples/pytorch/summarization/run_summarization.py -h Un'altra utile opzione è riavviare un addestramento da un checkpoint precedente. Questo garantirà che tu possa riprendere da dove hai interrotto senza ricominciare se l'addestramento viene interrotto. Ci sono due metodi per riavviare l'addestramento da un checkpoint: -Il primo metodo usa l'argomento `output_dir previous_output_dir` per riavviare l'addestramento dall'ultima versione del checkpoint contenuto in `output_dir`. In questo caso, dovresti rimuovere `overwrite_output_dir`: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -Il secondo metodo usa l'argomento `resume_from_checkpoint path_to_specific_checkpoint` per riavviare un addestramento da una specifica cartella di checkpoint. - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -273,7 +249,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -305,6 +280,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ja/main_classes/deepspeed.md b/docs/source/ja/main_classes/deepspeed.md index affb6c0a724c..a8801f0379ea 100644 --- a/docs/source/ja/main_classes/deepspeed.md +++ b/docs/source/ja/main_classes/deepspeed.md @@ -188,7 +188,7 @@ deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.js deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -211,7 +211,7 @@ DeepSpeed 関連の引数が 2 つありますが、簡単にするためであ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -1789,7 +1789,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ --model_name_or_path google-t5/t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ ---overwrite_output_dir --per_device_eval_batch_size 4 \ +--per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config "ro-en" --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix "translate English to Romanian: " diff --git a/docs/source/ja/main_classes/trainer.md b/docs/source/ja/main_classes/trainer.md index e6e6e28d308b..e5d55ff77b4c 100644 --- a/docs/source/ja/main_classes/trainer.md +++ b/docs/source/ja/main_classes/trainer.md @@ -534,7 +534,6 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` **注意すべきいくつかの注意事項** @@ -669,7 +668,6 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ ---overwrite_output_dir ``` 4. `accelerate launch`するための cmd 引数を直接使用することもできます。上の例は次のようにマッピングされます。 @@ -694,7 +692,6 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ ---overwrite_output_dir ``` 詳細については、🤗 Accelerate CLI ガイドを参照してください: [🤗 Accelerate スクリプトの起動](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 diff --git a/docs/source/ja/run_scripts.md b/docs/source/ja/run_scripts.md index ee738e3e4313..bf0ed8627024 100644 --- a/docs/source/ja/run_scripts.md +++ b/docs/source/ja/run_scripts.md @@ -104,7 +104,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -131,7 +130,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -153,7 +151,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -212,7 +209,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -240,7 +236,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -254,26 +249,6 @@ examples/pytorch/summarization/run_summarization.py -h 以前のチェックポイントからトレーニングを再開するための役立つオプションもあります。これにより、トレーニングが中断された場合でも、最初からやり直すことなく、中断したところから再開できます。チェックポイントからトレーニングを再開するための2つの方法があります。 -最初の方法は、`output_dir previous_output_dir` 引数を使用して、`output_dir` に保存された最新のチェックポイントからトレーニングを再開する方法です。この場合、`overwrite_output_dir` を削除する必要があります: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -2番目の方法では、`resume_from_checkpoint path_to_specific_checkpoint` 引数を使用して、特定のチェックポイントフォルダからトレーニングを再開します。 - - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -285,7 +260,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -319,7 +293,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ko/deepspeed.md b/docs/source/ko/deepspeed.md index d0955ee3db80..823865168be4 100644 --- a/docs/source/ko/deepspeed.md +++ b/docs/source/ko/deepspeed.md @@ -590,7 +590,7 @@ bf16은 설정 파일에서 설정하거나 다음 인수를 전달하면 명령 deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -605,7 +605,7 @@ deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro diff --git a/docs/source/ko/perf_train_special.md b/docs/source/ko/perf_train_special.md index 188db542f7c0..7927f3748f07 100644 --- a/docs/source/ko/perf_train_special.md +++ b/docs/source/ko/perf_train_special.md @@ -54,8 +54,7 @@ python examples/pytorch/text-classification/run_glue.py \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir + --output_dir /tmp/$TASK_NAME/ ``` `gloco`와 `nccl`과 같은 [분산 학습 백엔드](https://pytorch.org/docs/stable/distributed.html#backends)는 `mps` 장치에서 지원되지 않으므로, MPS 백엔드에서는 단일 GPU로만 학습이 가능합니다. diff --git a/docs/source/ko/run_scripts.md b/docs/source/ko/run_scripts.md index 874834a1f32a..2db8fe16c600 100644 --- a/docs/source/ko/run_scripts.md +++ b/docs/source/ko/run_scripts.md @@ -106,7 +106,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -131,7 +130,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -156,7 +154,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -216,7 +213,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -245,7 +241,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -262,8 +257,6 @@ examples/pytorch/summarization/run_summarization.py -h 이렇게 하면 훈련이 중단되더라도 처음부터 다시 시작하지 않고 중단한 부분부터 다시 시작할 수 있습니다. 체크포인트에서 훈련을 재개하는 방법에는 두 가지가 있습니다. -첫 번째는 `output_dir previous_output_dir` 인수를 사용하여 `output_dir`에 저장된 최신 체크포인트부터 훈련을 재개하는 방법입니다. -이 경우 `overwrite_output_dir`을 제거해야 합니다: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -275,24 +268,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -두 번째는 `resume_from_checkpoint path_to_specific_checkpoint` 인수를 사용하여 특정 체크포인트 폴더에서 훈련을 재개하는 방법입니다. - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -325,6 +300,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ko/trainer.md b/docs/source/ko/trainer.md index d753627c86fb..65c3fbef982f 100644 --- a/docs/source/ko/trainer.md +++ b/docs/source/ko/trainer.md @@ -505,7 +505,6 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` `config_file.yaml` 파일의 매개변수를 직접 지정할 수도 있습니다: @@ -528,7 +527,6 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` `accelerate_launch`와 사용자 정의 구성에 대해 더 알아보려면 [Accelerate 스크립트 실행](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 튜토리얼을 확인하세요. \ No newline at end of file diff --git a/docs/source/pt/run_scripts.md b/docs/source/pt/run_scripts.md index 4b4baf18988f..72060f98571d 100644 --- a/docs/source/pt/run_scripts.md +++ b/docs/source/pt/run_scripts.md @@ -99,7 +99,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -123,7 +122,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -145,7 +143,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -203,7 +200,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -231,7 +227,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -245,25 +240,6 @@ examples/pytorch/summarization/run_summarization.py -h Outra opção útil para habilitar é retomar o treinamento de um checkpoint anterior. Isso garantirá que você possa continuar de onde parou sem recomeçar se o seu treinamento for interrompido. Existem dois métodos para retomar o treinamento a partir de um checkpoint. -O primeiro método usa o argumento `output_dir previous_output_dir` para retomar o treinamento do último checkpoint armazenado em `output_dir`. Neste caso, você deve remover `overwrite_output_dir`: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -O segundo método usa o argumento `resume_from_checkpoint path_to_specific_checkpoint` para retomar o treinamento de uma pasta de checkpoint específica. - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -275,7 +251,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -307,6 +282,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/zh/main_classes/deepspeed.md b/docs/source/zh/main_classes/deepspeed.md index 8319f5cad4a3..2fe0d554a7f6 100644 --- a/docs/source/zh/main_classes/deepspeed.md +++ b/docs/source/zh/main_classes/deepspeed.md @@ -179,7 +179,7 @@ deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.js deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -202,7 +202,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --overwrite_output_dir --fp16 \ +--output_dir output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -1659,7 +1659,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ --model_name_or_path google-t5/t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ ---overwrite_output_dir --per_device_eval_batch_size 4 \ +--per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config "ro-en" --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix "translate English to Romanian: " diff --git a/docs/source/zh/main_classes/trainer.md b/docs/source/zh/main_classes/trainer.md index 159477fe64a0..5d587fd6d575 100644 --- a/docs/source/zh/main_classes/trainer.md +++ b/docs/source/zh/main_classes/trainer.md @@ -471,7 +471,6 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` **需要注意的一些注意事项** @@ -606,7 +605,6 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ ---overwrite_output_dir ``` 4. 你也可以直接使用`accelerate launch`的cmd参数。上面的示例将映射到: @@ -631,7 +629,6 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ ---overwrite_output_dir ``` 有关更多信息,请参阅 🤗 Accelerate CLI 指南:[启动您的 🤗 Accelerate 脚本](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 diff --git a/docs/source/zh/perf_train_special.md b/docs/source/zh/perf_train_special.md index ee8553475679..8e7929ecb1cd 100644 --- a/docs/source/zh/perf_train_special.md +++ b/docs/source/zh/perf_train_special.md @@ -50,7 +50,6 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ - --overwrite_output_dir ``` 用于[分布式设置](https://pytorch.org/docs/stable/distributed.html#backends)的后端(如`gloo`和`nccl`)不支持`mps`设备,这也意味着使用 MPS 后端时只能在单个 GPU 上进行训练。 diff --git a/docs/source/zh/run_scripts.md b/docs/source/zh/run_scripts.md index 32bf2342f9aa..60e78fcb9e9b 100644 --- a/docs/source/zh/run_scripts.md +++ b/docs/source/zh/run_scripts.md @@ -99,7 +99,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -124,7 +123,6 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -145,7 +143,6 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -202,7 +199,6 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -231,7 +227,6 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -245,26 +240,6 @@ examples/pytorch/summarization/run_summarization.py -h 另一个有用的选项是从之前的checkpoint恢复训练。这将确保在训练中断时,您可以从之前停止的地方继续进行,而无需重新开始。有两种方法可以从checkpoint恢复训练。 -第一种方法使用`output_dir previous_output_dir`参数从存储在`output_dir`中的最新的checkpoint恢复训练。在这种情况下,您应该删除`overwrite_output_dir`: - -```bash -python examples/pytorch/summarization/run_summarization.py - --model_name_or_path google-t5/t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --output_dir previous_output_dir \ - --predict_with_generate -``` - -第二种方法使用`resume_from_checkpoint path_to_specific_checkpoint`参数从特定的checkpoint文件夹恢复训练。 - - ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -276,7 +251,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -309,6 +283,5 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/examples/legacy/multiple_choice/run_multiple_choice.py b/examples/legacy/multiple_choice/run_multiple_choice.py index aa1297656a90..92947e2092cf 100644 --- a/examples/legacy/multiple_choice/run_multiple_choice.py +++ b/examples/legacy/multiple_choice/run_multiple_choice.py @@ -95,17 +95,6 @@ def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" - " --overwrite_output_dir to overcome." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/question-answering/run_squad.py b/examples/legacy/question-answering/run_squad.py index 39ba14a12afa..126fa197ee27 100644 --- a/examples/legacy/question-answering/run_squad.py +++ b/examples/legacy/question-answering/run_squad.py @@ -641,9 +641,6 @@ def main(): help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) @@ -677,18 +674,6 @@ def main(): "stride or increase the maximum length to ensure the features are correctly built." ) - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script diff --git a/examples/legacy/question-answering/run_squad_trainer.py b/examples/legacy/question-answering/run_squad_trainer.py index d3730d1bc0ba..5288e3019b9a 100644 --- a/examples/legacy/question-answering/run_squad_trainer.py +++ b/examples/legacy/question-answering/run_squad_trainer.py @@ -76,17 +76,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" - " --overwrite_output_dir to overcome." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/run_language_modeling.py b/examples/legacy/run_language_modeling.py index 8a6b8eded34a..64c92fa205e0 100755 --- a/examples/legacy/run_language_modeling.py +++ b/examples/legacy/run_language_modeling.py @@ -207,16 +207,6 @@ def main(): "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" - " --overwrite_output_dir to overcome." - ) # Setup logging logging.basicConfig( diff --git a/examples/legacy/run_swag.py b/examples/legacy/run_swag.py index 221f9cc9c98d..8c80cf8a347a 100755 --- a/examples/legacy/run_swag.py +++ b/examples/legacy/run_swag.py @@ -557,9 +557,6 @@ def main(): help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) @@ -584,18 +581,6 @@ def main(): parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script diff --git a/examples/legacy/seq2seq/finetune_trainer.py b/examples/legacy/seq2seq/finetune_trainer.py index 44f5a75eda0a..54ca2c898c82 100755 --- a/examples/legacy/seq2seq/finetune_trainer.py +++ b/examples/legacy/seq2seq/finetune_trainer.py @@ -39,7 +39,6 @@ Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, - check_output_dir, freeze_embeds, freeze_params, lmap, @@ -168,8 +167,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - check_output_dir(training_args) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/seq2seq/train_distil_marian_enro.sh b/examples/legacy/seq2seq/train_distil_marian_enro.sh index 5e86a6991c57..d0e1075bb079 100644 --- a/examples/legacy/seq2seq/train_distil_marian_enro.sh +++ b/examples/legacy/seq2seq/train_distil_marian_enro.sh @@ -20,7 +20,7 @@ export MAX_LEN=128 python finetune_trainer.py \ --tokenizer_name $m --model_name_or_path $m \ --data_dir $ENRO_DIR \ - --output_dir marian_en_ro_6_3 --overwrite_output_dir \ + --output_dir marian_en_ro_6_3 \ --learning_rate=3e-4 \ --warmup_steps 500 --sortish_sampler \ --fp16 \ diff --git a/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh b/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh index 00ef67226196..fcf4ea13698f 100644 --- a/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh +++ b/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh @@ -22,7 +22,7 @@ python xla_spawn.py --num_cores $TPU_NUM_CORES \ finetune_trainer.py \ --tokenizer_name $m --model_name_or_path $m \ --data_dir $ENRO_DIR \ - --output_dir marian_en_ro_6_3 --overwrite_output_dir \ + --output_dir marian_en_ro_6_3 \ --learning_rate=3e-4 \ --warmup_steps 500 \ --per_device_train_batch_size=$BS --per_device_eval_batch_size=$BS \ diff --git a/examples/legacy/seq2seq/train_distilbart_cnn.sh b/examples/legacy/seq2seq/train_distilbart_cnn.sh index 42f34e0cb6e7..a490019588ce 100644 --- a/examples/legacy/seq2seq/train_distilbart_cnn.sh +++ b/examples/legacy/seq2seq/train_distilbart_cnn.sh @@ -21,7 +21,7 @@ export MAX_TGT_LEN=142 python finetune_trainer.py \ --model_name_or_path $m --tokenizer_name $tok \ --data_dir cnn_dm \ - --output_dir distilbart-cnn-12-6 --overwrite_output_dir \ + --output_dir distilbart-cnn-12-6 \ --learning_rate=3e-5 \ --warmup_steps 500 --sortish_sampler \ --fp16 \ diff --git a/examples/legacy/seq2seq/train_mbart_cc25_enro.sh b/examples/legacy/seq2seq/train_mbart_cc25_enro.sh index 63c8051b47de..fb31790a2c19 100644 --- a/examples/legacy/seq2seq/train_mbart_cc25_enro.sh +++ b/examples/legacy/seq2seq/train_mbart_cc25_enro.sh @@ -15,7 +15,7 @@ python finetune_trainer.py \ --model_name_or_path=facebook/mbart-large-cc25 \ --data_dir $ENRO_DIR \ - --output_dir mbart_cc25_enro --overwrite_output_dir \ + --output_dir mbart_cc25_enro \ --learning_rate=3e-5 \ --warmup_steps 500 \ --fp16 \ diff --git a/examples/legacy/seq2seq/utils.py b/examples/legacy/seq2seq/utils.py index 221b1405aa26..43d66128360d 100644 --- a/examples/legacy/seq2seq/utils.py +++ b/examples/legacy/seq2seq/utils.py @@ -639,27 +639,3 @@ def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] - - -def check_output_dir(args, expected_items=0): - """ - Checks whether to bail out if output_dir already exists and has more than expected_items in it - - `args`: needs to have the following attributes of `args`: - - output_dir - - do_train - - overwrite_output_dir - - `expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM) - """ - if ( - os.path.exists(args.output_dir) - and len(os.listdir(args.output_dir)) > expected_items - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({args.output_dir}) already exists and " - f"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). " - "Use --overwrite_output_dir to overcome." - ) diff --git a/examples/legacy/token-classification/run_ner.py b/examples/legacy/token-classification/run_ner.py index 69b8a27ac799..1e6e5e402310 100644 --- a/examples/legacy/token-classification/run_ner.py +++ b/examples/legacy/token-classification/run_ner.py @@ -111,17 +111,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" - " --overwrite_output_dir to overcome." - ) - module = import_module("tasks") try: token_classification_task_clazz = getattr(module, model_args.task_type) diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index 18ed7ae8e38b..f803f71525e3 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -72,8 +72,7 @@ token-classification/run_ner.py -h You can resume training from a previous checkpoint like this: -1. Pass `--output_dir previous_output_dir` without `--overwrite_output_dir` to resume training from the latest checkpoint in `output_dir` (what you would use if the training was interrupted, for instance). -2. Pass `--resume_from_checkpoint path_to_a_specific_checkpoint` to resume training from that checkpoint folder. +1. Pass `--resume_from_checkpoint path_to_a_specific_checkpoint` to resume training from that checkpoint folder. Should you want to turn an example into a notebook where you'd no longer have access to the command line, 🤗 Trainer supports resuming from a checkpoint via `trainer.train(resume_from_checkpoint)`. diff --git a/examples/pytorch/audio-classification/README.md b/examples/pytorch/audio-classification/README.md index 6f9069b331ab..2910b3587623 100644 --- a/examples/pytorch/audio-classification/README.md +++ b/examples/pytorch/audio-classification/README.md @@ -34,7 +34,6 @@ python run_audio_classification.py \ --dataset_name superb \ --dataset_config_name ks \ --output_dir wav2vec2-base-ft-keyword-spotting \ - --overwrite_output_dir \ --remove_unused_columns False \ --do_train \ --do_eval \ @@ -76,7 +75,6 @@ python run_audio_classification.py \ --audio_column_name audio \ --label_column_name language \ --output_dir wav2vec2-base-lang-id \ - --overwrite_output_dir \ --remove_unused_columns False \ --do_train \ --do_eval \ diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index bd190e801520..3abd6e158d39 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -47,7 +47,6 @@ TrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -245,21 +244,6 @@ def main(): # Set seed before initializing model. set_seed(training_args.seed) - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to train from scratch." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Initialize our dataset and prepare it for the audio classification task. raw_datasets = DatasetDict() raw_datasets["train"] = load_dataset( @@ -408,8 +392,6 @@ def compute_metrics(eval_pred): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/contrastive-image-text/README.md b/examples/pytorch/contrastive-image-text/README.md index 864a31b60372..9211c0cc59f9 100644 --- a/examples/pytorch/contrastive-image-text/README.md +++ b/examples/pytorch/contrastive-image-text/README.md @@ -97,6 +97,5 @@ python run_clip.py \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ - --overwrite_output_dir \ --push_to_hub ``` diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 461062f6849b..51b31edb92d0 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -271,21 +271,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # 3. Detecting last checkpoint and eventually continue from last checkpoint - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # 4. Load dataset # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ @@ -497,8 +482,6 @@ def filter_corrupt_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 8b498b545c45..6da5876fd0ca 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -225,21 +225,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -418,8 +403,6 @@ def val_transforms(example_batch): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/image-pretraining/README.md b/examples/pytorch/image-pretraining/README.md index bca37f24135a..9e1eae48f834 100644 --- a/examples/pytorch/image-pretraining/README.md +++ b/examples/pytorch/image-pretraining/README.md @@ -44,7 +44,6 @@ Alternatively, one can decide to further pre-train an already pre-trained (or fi !python run_mim.py \ --model_type vit \ --output_dir ./outputs/ \ - --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ @@ -95,7 +94,6 @@ Next, we can run the script by providing the path to this custom configuration ( --config_name_or_path path_to_config \ --model_type swin \ --output_dir ./outputs/ \ - --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 2d92d8ab434d..14da341177bb 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -217,21 +217,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Initialize our dataset. ds = load_dataset( data_args.dataset_name, @@ -377,8 +362,6 @@ def preprocess_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 5a636bbad58b..6a17d4242e17 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -281,21 +281,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Initialize our dataset. ds = load_dataset( data_args.dataset_name, @@ -456,8 +441,6 @@ def preprocess_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/instance-segmentation/run_instance_segmentation.py b/examples/pytorch/instance-segmentation/run_instance_segmentation.py index ddfd05e0f661..5d9e992c4b9e 100644 --- a/examples/pytorch/instance-segmentation/run_instance_segmentation.py +++ b/examples/pytorch/instance-segmentation/run_instance_segmentation.py @@ -334,18 +334,6 @@ def find_last_checkpoint(training_args: TrainingArguments) -> Optional[str]: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: - checkpoint = get_last_checkpoint(training_args.output_dir) - if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) return checkpoint diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 8c677b404630..eda38a2e0d39 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -317,21 +317,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -665,8 +650,6 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/language-modeling/run_fim.py b/examples/pytorch/language-modeling/run_fim.py index 134d741f6b6c..4e309e555e0d 100644 --- a/examples/pytorch/language-modeling/run_fim.py +++ b/examples/pytorch/language-modeling/run_fim.py @@ -344,21 +344,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -806,8 +791,6 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 9c0bf50ede28..9e9a2bd680a7 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -290,21 +290,6 @@ def main(): # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -631,8 +616,6 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 86bc31beedf8..b0df1dfb9bc7 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -269,21 +269,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -528,8 +513,6 @@ def group_texts(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index a8679f2b739c..7933b7f968e9 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -213,21 +213,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -401,8 +386,6 @@ def compute_metrics(eval_predictions): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/object-detection/run_object_detection.py b/examples/pytorch/object-detection/run_object_detection.py index ee0bd66cae99..64d988e2bd98 100644 --- a/examples/pytorch/object-detection/run_object_detection.py +++ b/examples/pytorch/object-detection/run_object_detection.py @@ -373,23 +373,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: - checkpoint = get_last_checkpoint(training_args.output_dir) - if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # ------------------------------------------------------------------------------------------------ # Load dataset, prepare splits # ------------------------------------------------------------------------------------------------ diff --git a/examples/pytorch/old_test_xla_examples.py b/examples/pytorch/old_test_xla_examples.py index b3101aa06b98..d0bcb1c8478f 100644 --- a/examples/pytorch/old_test_xla_examples.py +++ b/examples/pytorch/old_test_xla_examples.py @@ -55,7 +55,6 @@ def test_run_glue(self): ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} - --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index be93a526b803..21fe714c7233 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -262,21 +262,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -646,8 +631,6 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 4bcf4f9af8c8..8b31b20fee2f 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -260,21 +260,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -672,8 +657,6 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index ac3c8ef4ec62..738ca3b610e4 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -307,21 +307,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -669,8 +654,6 @@ def post_processing_function( checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index cc45239f75c0..b9642e0454d6 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -221,21 +221,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. @@ -419,8 +404,6 @@ def preprocess_batch(example_batch, transforms: A.Compose): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/speech-recognition/README.md b/examples/pytorch/speech-recognition/README.md index 41df41880b5a..245954920645 100644 --- a/examples/pytorch/speech-recognition/README.md +++ b/examples/pytorch/speech-recognition/README.md @@ -70,7 +70,6 @@ python run_speech_recognition_ctc.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir="./wav2vec2-common_voice-tr-demo" \ - --overwrite_output_dir \ --num_train_epochs="15" \ --per_device_train_batch_size="16" \ --gradient_accumulation_steps="2" \ @@ -106,7 +105,6 @@ torchrun \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir="./wav2vec2-common_voice-tr-demo-dist" \ - --overwrite_output_dir \ --num_train_epochs="15" \ --per_device_train_batch_size="4" \ --learning_rate="3e-4" \ @@ -156,7 +154,6 @@ However, the `--shuffle_buffer_size` argument controls how many examples we can --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="wav2vec2-xls-r-common_voice-tr-ft" \ - --overwrite_output_dir \ --max_steps="5000" \ --per_device_train_batch_size="8" \ --gradient_accumulation_steps="2" \ @@ -390,7 +387,6 @@ python run_speech_recognition_seq2seq.py \ --freeze_feature_encoder="False" \ --gradient_checkpointing \ --fp16 \ - --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ @@ -431,7 +427,6 @@ torchrun \ --freeze_feature_encoder="False" \ --gradient_checkpointing \ --fp16 \ - --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ @@ -539,7 +534,6 @@ python run_speech_recognition_seq2seq.py \ --output_dir="./" \ --preprocessing_num_workers="16" \ --length_column_name="input_length" \ - --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ @@ -581,7 +575,6 @@ torchrun \ --output_dir="./" \ --preprocessing_num_workers="16" \ --length_column_name="input_length" \ - --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index c756a6666187..b32a63061abe 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -429,21 +429,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -558,7 +543,7 @@ def remove_special_characters(batch): vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): - if training_args.overwrite_output_dir and os.path.isfile(vocab_file): + if os.path.isfile(vocab_file): try: os.remove(vocab_file) except OSError: @@ -781,8 +766,6 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - if last_checkpoint is not None: - checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index aaebf59c8660..2e7ac34ddc56 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -406,21 +406,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -557,7 +542,7 @@ def remove_special_characters(batch): vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): - if training_args.overwrite_output_dir and os.path.isfile(vocab_file): + if os.path.isfile(vocab_file): try: os.remove(vocab_file) except OSError: @@ -773,8 +758,6 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - if last_checkpoint is not None: - checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 4b6cda49925b..77df3872e771 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -340,21 +340,6 @@ def main(): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) - # 3. Detecting last checkpoint and eventually continue from last checkpoint - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -603,8 +588,6 @@ def compute_metrics(pred): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the feature extractor too for easy upload diff --git a/examples/pytorch/summarization/README.md b/examples/pytorch/summarization/README.md index 26561df24249..e47f09120514 100644 --- a/examples/pytorch/summarization/README.md +++ b/examples/pytorch/summarization/README.md @@ -50,7 +50,6 @@ python run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -72,7 +71,6 @@ python run_summarization.py \ --validation_file path_to_csv_or_jsonlines_file \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index dd7dd083b49a..2ab7ff3d0759 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -374,21 +374,6 @@ def main(): "`--source_prefix 'summarize: ' `" ) - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -698,8 +683,6 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/test_pytorch_examples.py b/examples/pytorch/test_pytorch_examples.py index d27cc305d6ac..8bfadac779d3 100644 --- a/examples/pytorch/test_pytorch_examples.py +++ b/examples/pytorch/test_pytorch_examples.py @@ -104,7 +104,6 @@ def test_run_glue(self): run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} - --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train @@ -140,7 +139,6 @@ def test_run_clm(self): --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} - --overwrite_output_dir """.split() if backend_device_count(torch_device) > 1: @@ -188,7 +186,6 @@ def test_run_mlm(self): --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} - --overwrite_output_dir --do_train --do_eval --prediction_loss_only @@ -214,7 +211,6 @@ def test_run_ner(self): --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --do_train --do_eval --warmup_steps=2 @@ -243,7 +239,6 @@ def test_run_squad(self): --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train @@ -271,7 +266,6 @@ def test_run_squad_seq2seq(self): --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train @@ -296,7 +290,6 @@ def test_run_swag(self): --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --max_steps=20 --warmup_steps=2 --do_train @@ -334,7 +327,6 @@ def test_run_summarization(self): --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train @@ -364,7 +356,6 @@ def test_run_translation(self): --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} - --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train @@ -396,7 +387,6 @@ def test_run_image_classification(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False - --overwrite_output_dir True --dataloader_num_workers 16 --metric_for_best_model accuracy --max_steps 10 @@ -429,7 +419,6 @@ def test_run_speech_recognition_ctc(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False - --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --seed 42 @@ -459,7 +448,6 @@ def test_run_speech_recognition_ctc_adapter(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False - --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --target_language tur @@ -491,7 +479,6 @@ def test_run_speech_recognition_seq2seq(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 4 --remove_unused_columns False - --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --seed 42 @@ -523,7 +510,6 @@ def test_run_audio_classification(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False - --overwrite_output_dir True --num_train_epochs 10 --max_steps 50 --seed 42 @@ -572,7 +558,6 @@ def test_run_vit_mae_pretraining(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False - --overwrite_output_dir True --dataloader_num_workers 16 --metric_for_best_model accuracy --max_steps 10 @@ -597,7 +582,6 @@ def test_run_semantic_segmentation(self): --do_train --do_eval --remove_unused_columns False - --overwrite_output_dir True --max_steps 10 --learning_rate=2e-4 --per_device_train_batch_size=2 @@ -624,7 +608,6 @@ def test_run_object_detection(self): --do_train --do_eval --remove_unused_columns False - --overwrite_output_dir True --eval_do_concat_batches False --max_steps 10 --learning_rate=1e-6 diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index 35413cd7875b..bf62b52e355c 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -321,21 +321,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -693,8 +678,6 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index afa09d746041..101dff28e986 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -266,21 +266,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -566,8 +551,6 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 3027da5feae6..8a57d101e997 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -224,21 +224,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -412,8 +397,6 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 7620d697c126..19b8b59ac36a 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -263,21 +263,6 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -587,8 +572,6 @@ def compute_metrics(p): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/translation/README.md b/examples/pytorch/translation/README.md index 4659843c66a1..2aab14e2e056 100644 --- a/examples/pytorch/translation/README.md +++ b/examples/pytorch/translation/README.md @@ -53,7 +53,6 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -74,7 +73,6 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -96,7 +94,6 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -118,7 +115,6 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` @@ -144,7 +140,6 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ --predict_with_generate ``` diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index 8e005e0d7323..da3cb9bdc1ec 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -322,21 +322,6 @@ def main(): "`--source_prefix 'translate English to German: ' `" ) - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Set seed before initializing model. set_seed(training_args.seed) @@ -617,8 +602,6 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index c1d81972f8a9..c1e1e0d3ef07 100755 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -1796,7 +1796,7 @@ def _log_model_checkpoint(self, source_directory: str, checkpoint: str): def on_init_end(self, args, state, control, **kwargs): self._volatile_checkpoints_dir = None - if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): + if self._log_checkpoints and args.save_total_limit is not None: self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name if self._log_checkpoints == "best" and not args.load_best_model_at_end: diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 68200f1af9c2..ca97d6a189bc 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -220,9 +220,6 @@ class TrainingArguments: Parameters: output_dir (`str`, *optional*, defaults to `"trainer_output"`): The output directory where the model predictions and checkpoints will be written. - overwrite_output_dir (`bool`, *optional*, defaults to `False`): - If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` - points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example @@ -811,8 +808,7 @@ class TrainingArguments: default=False, metadata={ "help": ( - "Overwrite the content of the output directory. " - "Use this to continue training if output_dir points to a checkpoint directory." + "This argument is deprecated and will be removed in v5." ) }, ) diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py index 8ada67913b03..87ddf02ec341 100755 --- a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -219,21 +219,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -452,16 +437,12 @@ def tokenize_function(examples): # Training if training_args.do_train: {%- if cookiecutter.can_train_from_scratch == "False" %} - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): + if os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None {%- elif cookiecutter.can_train_from_scratch == "True" %} - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): + if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py index 99b1450a0d59..bf3aba7e1a4d 100644 --- a/tests/deepspeed/test_deepspeed.py +++ b/tests/deepspeed/test_deepspeed.py @@ -1303,7 +1303,6 @@ def run_trainer( --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --output_dir {output_dir} - --overwrite_output_dir --max_source_length {max_len} --max_target_length {max_len} --val_max_target_length {max_len} @@ -1373,7 +1372,6 @@ def test_clm(self, stage, dtype): --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} - --overwrite_output_dir --do_train --do_eval --max_train_samples 16 @@ -1410,7 +1408,6 @@ def test_clm_from_config_zero3_fp16(self): --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} - --overwrite_output_dir --do_train --max_train_samples 4 --per_device_train_batch_size 2 diff --git a/tests/deepspeed/test_model_zoo.py b/tests/deepspeed/test_model_zoo.py index 2195bee01ccf..a7201ef7f9a0 100644 --- a/tests/deepspeed/test_model_zoo.py +++ b/tests/deepspeed/test_model_zoo.py @@ -161,7 +161,6 @@ def make_task_cmds(): --num_train_epochs 1 --fp16 --report_to none - --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) diff --git a/tests/extended/test_trainer_ext.py b/tests/extended/test_trainer_ext.py index 1789f9f6c98b..80e71691bbbd 100644 --- a/tests/extended/test_trainer_ext.py +++ b/tests/extended/test_trainer_ext.py @@ -267,7 +267,6 @@ def run_trainer( --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} - --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} diff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py index 6a4060b0a731..8764e21e6713 100644 --- a/tests/fsdp/test_fsdp.py +++ b/tests/fsdp/test_fsdp.py @@ -439,7 +439,6 @@ def get_base_args(self, output_dir, num_epochs, logging_steps): --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir {output_dir} - --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 diff --git a/tests/sagemaker/conftest.py b/tests/sagemaker/conftest.py index 5daf3c4147f9..879cfff1c5ea 100644 --- a/tests/sagemaker/conftest.py +++ b/tests/sagemaker/conftest.py @@ -21,7 +21,6 @@ class SageMakerTestEnvironment: "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", - "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index 525b63f1bc88..f5a259792518 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -198,21 +198,6 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -459,9 +444,7 @@ def compute_metrics(p: EvalPrediction): # Training if training_args.do_train: checkpoint = None - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): + if os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 266a874b64b5..cd4ce82e642f 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -4459,7 +4459,6 @@ def test_end_to_end_example(self): "1", "--output_dir", tmpdir, - "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", From cf4f9e7c4f7837a88eea6eeabf8b4dfe9455f6dc Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 3 Oct 2025 15:50:28 +0000 Subject: [PATCH 2/8] style --- examples/pytorch/contrastive-image-text/run_clip.py | 1 - .../image-classification/run_image_classification.py | 1 - examples/pytorch/image-pretraining/run_mae.py | 1 - examples/pytorch/image-pretraining/run_mim.py | 1 - .../instance-segmentation/run_instance_segmentation.py | 1 - examples/pytorch/language-modeling/run_clm.py | 1 - examples/pytorch/language-modeling/run_fim.py | 1 - examples/pytorch/language-modeling/run_mlm.py | 1 - examples/pytorch/language-modeling/run_plm.py | 1 - examples/pytorch/multiple-choice/run_swag.py | 1 - examples/pytorch/object-detection/run_object_detection.py | 3 +-- examples/pytorch/question-answering/run_qa.py | 1 - examples/pytorch/question-answering/run_qa_beam_search.py | 1 - examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../semantic-segmentation/run_semantic_segmentation.py | 1 - .../speech-recognition/run_speech_recognition_ctc.py | 4 ++-- .../run_speech_recognition_ctc_adapter.py | 4 ++-- .../speech-recognition/run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 1 - examples/pytorch/text-classification/run_classification.py | 1 - examples/pytorch/text-classification/run_glue.py | 1 - examples/pytorch/text-classification/run_xnli.py | 1 - examples/pytorch/token-classification/run_ner.py | 1 - examples/pytorch/translation/run_translation.py | 1 - src/transformers/training_args.py | 6 +----- .../sagemaker/scripts/pytorch/run_glue_model_parallelism.py | 1 - 26 files changed, 8 insertions(+), 33 deletions(-) diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 51b31edb92d0..5f1256813d8c 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -55,7 +55,6 @@ TrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 6da5876fd0ca..04107ffdfb3e 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -58,7 +58,6 @@ TrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 14da341177bb..ea350a855feb 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -41,7 +41,6 @@ ViTMAEConfig, ViTMAEForPreTraining, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 6a17d4242e17..c0b86d009ba8 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -44,7 +44,6 @@ Trainer, TrainingArguments, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/instance-segmentation/run_instance_segmentation.py b/examples/pytorch/instance-segmentation/run_instance_segmentation.py index 5d9e992c4b9e..8e6342a31607 100644 --- a/examples/pytorch/instance-segmentation/run_instance_segmentation.py +++ b/examples/pytorch/instance-segmentation/run_instance_segmentation.py @@ -49,7 +49,6 @@ ) from transformers.image_processing_utils import BatchFeature from transformers.trainer import EvalPrediction -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index eda38a2e0d39..986f873870dc 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -63,7 +63,6 @@ set_seed, ) from transformers.testing_utils import CaptureLogger -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_fim.py b/examples/pytorch/language-modeling/run_fim.py index 4e309e555e0d..14457275811d 100644 --- a/examples/pytorch/language-modeling/run_fim.py +++ b/examples/pytorch/language-modeling/run_fim.py @@ -66,7 +66,6 @@ ) from transformers.integrations import is_deepspeed_zero3_enabled from transformers.testing_utils import CaptureLogger -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 9e9a2bd680a7..80ad49bef983 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -62,7 +62,6 @@ is_torch_xla_available, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index b0df1dfb9bc7..f81fd37d5e40 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -55,7 +55,6 @@ XLNetLMHeadModel, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 7933b7f968e9..d1f785d51ca1 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -52,7 +52,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version diff --git a/examples/pytorch/object-detection/run_object_detection.py b/examples/pytorch/object-detection/run_object_detection.py index 64d988e2bd98..e2018bf97226 100644 --- a/examples/pytorch/object-detection/run_object_detection.py +++ b/examples/pytorch/object-detection/run_object_detection.py @@ -51,7 +51,6 @@ from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.trainer import EvalPrediction -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -493,7 +492,7 @@ def main(): # Training if training_args.do_train: - train_result = trainer.train(resume_from_checkpoint=checkpoint) + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 21fe714c7233..ab5bf3267b74 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -43,7 +43,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 8b31b20fee2f..882f4fa9b272 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -41,7 +41,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 738ca3b610e4..0c0722510933 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -39,7 +39,7 @@ Seq2SeqTrainingArguments, set_seed, ) -from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint +from transformers.trainer_utils import EvalLoopOutput, EvalPrediction from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index b9642e0454d6..6fd83df7aa8f 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -52,7 +52,6 @@ TrainingArguments, default_data_collator, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index b32a63061abe..f94105cd1d10 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -55,7 +55,7 @@ Wav2Vec2Processor, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint, is_main_process +from transformers.trainer_utils import is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -766,7 +766,7 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - elif os.path.isdir(model_args.model_name_or_path): + if os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index 2e7ac34ddc56..adfe91a31d5f 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -58,7 +58,7 @@ set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import WAV2VEC2_ADAPTER_SAFE_FILE -from transformers.trainer_utils import get_last_checkpoint, is_main_process +from transformers.trainer_utils import is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -758,7 +758,7 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - elif os.path.isdir(model_args.model_name_or_path): + if os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 77df3872e771..7e124d4326f3 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -54,7 +54,7 @@ Seq2SeqTrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint, is_main_process +from transformers.trainer_utils import is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 2ab7ff3d0759..7a3cf76188f8 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -61,7 +61,6 @@ Seq2SeqTrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, is_offline_mode from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index bf62b52e355c..78a9d0094911 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -55,7 +55,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 101dff28e986..cc2e6e1fe59e 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -57,7 +57,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 8a57d101e997..b64ac29552f1 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -56,7 +56,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 19b8b59ac36a..22ab28e651a7 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -54,7 +54,6 @@ TrainingArguments, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index da3cb9bdc1ec..b068cd3ce2de 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -60,7 +60,6 @@ default_data_collator, set_seed, ) -from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index ca97d6a189bc..f13a2ad6e1da 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -806,11 +806,7 @@ class TrainingArguments: ) overwrite_output_dir: bool = field( default=False, - metadata={ - "help": ( - "This argument is deprecated and will be removed in v5." - ) - }, + metadata={"help": ("This argument is deprecated and will be removed in v5.")}, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index f5a259792518..c07ad30d7f29 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -38,7 +38,6 @@ set_seed, ) from transformers.trainer import Trainer -from transformers.trainer_utils import get_last_checkpoint from transformers.training_args import TrainingArguments from transformers.utils import check_min_version From 597cc536c2e5c2378cfee1d0af3eff3669db5b3c Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 3 Oct 2025 16:19:08 +0000 Subject: [PATCH 3/8] deprecate warmup_ratio --- docs/source/en/optimizers.md | 2 +- docs/source/en/tasks/audio_classification.md | 2 +- docs/source/en/tasks/image_classification.md | 2 +- docs/source/en/tasks/video_classification.md | 2 +- docs/source/es/tasks/audio_classification.md | 2 +- docs/source/ja/main_classes/deepspeed.md | 5 +- docs/source/ja/tasks/audio_classification.md | 2 +- docs/source/ja/tasks/image_classification.md | 2 +- docs/source/ja/tasks/video_classification.md | 2 +- docs/source/ko/optimizers.md | 2 +- docs/source/ko/tasks/audio_classification.md | 2 +- docs/source/ko/tasks/image_classification.md | 2 +- docs/source/ko/tasks/video_classification.md | 2 +- docs/source/zh/main_classes/deepspeed.md | 4 +- .../pytorch/audio-classification/README.md | 4 +- examples/pytorch/image-pretraining/README.md | 2 +- src/transformers/modelcard.py | 2 - src/transformers/training_args.py | 54 +++++++++---------- 18 files changed, 44 insertions(+), 51 deletions(-) diff --git a/docs/source/en/optimizers.md b/docs/source/en/optimizers.md index 873b09349feb..e95d6e9c8d66 100644 --- a/docs/source/en/optimizers.md +++ b/docs/source/en/optimizers.md @@ -154,7 +154,7 @@ pip install schedulefree [Schedule Free optimizer (SFO)](https://hf.co/papers/2405.15682) replaces the base optimizers momentum with a combination of averaging and interpolation. Unlike a traditional scheduler, SFO completely removes the need to anneal the learning rate. -SFO supports the RAdam (`schedule_free_radam`), AdamW (`schedule_free_adamw`) and SGD (`schedule_free_sgd`) optimizers. The RAdam scheduler doesn't require `warmup_steps` or `warmup_ratio`. +SFO supports the RAdam (`schedule_free_radam`), AdamW (`schedule_free_adamw`) and SGD (`schedule_free_sgd`) optimizers. The RAdam scheduler doesn't require `warmup_steps`. By default, it is recommended to set `lr_scheduler_type="constant"`. Other `lr_scheduler_type` values may also work, but combining SFO optimizers with other learning rate schedules could affect SFOs intended behavior and performance. diff --git a/docs/source/en/tasks/audio_classification.md b/docs/source/en/tasks/audio_classification.md index 250b980be190..cc8a8238022a 100644 --- a/docs/source/en/tasks/audio_classification.md +++ b/docs/source/en/tasks/audio_classification.md @@ -220,7 +220,7 @@ At this point, only three steps remain: ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/en/tasks/image_classification.md b/docs/source/en/tasks/image_classification.md index 4754a91bd482..0af4be8ed6b9 100644 --- a/docs/source/en/tasks/image_classification.md +++ b/docs/source/en/tasks/image_classification.md @@ -211,7 +211,7 @@ At this point, only three steps remain: ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/en/tasks/video_classification.md b/docs/source/en/tasks/video_classification.md index bae638bd84ed..f60a8221caf6 100644 --- a/docs/source/en/tasks/video_classification.md +++ b/docs/source/en/tasks/video_classification.md @@ -378,7 +378,7 @@ Most of the training arguments are self-explanatory, but one that is quite impor ... learning_rate=5e-5, ... per_device_train_batch_size=batch_size, ... per_device_eval_batch_size=batch_size, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/es/tasks/audio_classification.md b/docs/source/es/tasks/audio_classification.md index 3b0446143262..bc63a93c88d2 100644 --- a/docs/source/es/tasks/audio_classification.md +++ b/docs/source/es/tasks/audio_classification.md @@ -220,7 +220,7 @@ Al llegar a este punto, solo quedan tres pasos: ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ja/main_classes/deepspeed.md b/docs/source/ja/main_classes/deepspeed.md index a8801f0379ea..84d2bfd492a1 100644 --- a/docs/source/ja/main_classes/deepspeed.md +++ b/docs/source/ja/main_classes/deepspeed.md @@ -1292,7 +1292,7 @@ DeepSpeed は、`LRRangeTest`、`OneCycle`、`WarmupLR`、および`WarmupDecayL したがって、スケジューラを設定しない場合、これがデフォルトで設定されるスケジューラになります。 設定ファイルで `scheduler` エントリを設定しない場合、[`Trainer`] は -`--lr_scheduler_type`、`--learning_rate`、および `--warmup_steps` または `--warmup_ratio` の値を設定します。 +`--lr_scheduler_type`、`--learning_rate`、および `--warmup_steps` の値を設定します。 🤗 それのトランスフォーマーバージョン。 以下は、`WarmupLR`の自動構成された`scheduler`エントリの例です。 @@ -1316,8 +1316,7 @@ DeepSpeed は、`LRRangeTest`、`OneCycle`、`WarmupLR`、および`WarmupDecayL - `warmup_min_lr` の値は `0` です。 - `warmup_max_lr` と `--learning_rate` の値。 -- `warmup_num_steps` と `--warmup_steps` の値 (指定されている場合)。それ以外の場合は `--warmup_ratio` を使用します - トレーニング ステップの数を乗算し、切り上げます。 +- `warmup_num_steps` と `--warmup_steps` の値 (指定されている場合) - `total_num_steps` には `--max_steps` の値を指定するか、指定されていない場合は実行時に自動的に導出されます。 環境、データセットのサイズ、およびその他のコマンド ライン引数 ( `WarmupDecayLR`)。 diff --git a/docs/source/ja/tasks/audio_classification.md b/docs/source/ja/tasks/audio_classification.md index d37485cbe226..e1831aa50c38 100644 --- a/docs/source/ja/tasks/audio_classification.md +++ b/docs/source/ja/tasks/audio_classification.md @@ -219,7 +219,7 @@ MInDS-14 データセットのサンプリング レートは 8khz です (こ ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ja/tasks/image_classification.md b/docs/source/ja/tasks/image_classification.md index 32c30dcff7c8..164176a911d5 100644 --- a/docs/source/ja/tasks/image_classification.md +++ b/docs/source/ja/tasks/image_classification.md @@ -216,7 +216,7 @@ Datasets、🤗 データセット ライブラリから Food-101 データセ ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ja/tasks/video_classification.md b/docs/source/ja/tasks/video_classification.md index e7e7803c9408..32e871f0ab49 100644 --- a/docs/source/ja/tasks/video_classification.md +++ b/docs/source/ja/tasks/video_classification.md @@ -360,7 +360,7 @@ You should probably TRAIN this model on a down-stream task to be able to use it ... learning_rate=5e-5, ... per_device_train_batch_size=batch_size, ... per_device_eval_batch_size=batch_size, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ko/optimizers.md b/docs/source/ko/optimizers.md index 7b6fcc7b1016..a5bf877ed6e5 100644 --- a/docs/source/ko/optimizers.md +++ b/docs/source/ko/optimizers.md @@ -154,7 +154,7 @@ pip install schedulefree [Schedule Free optimizer (SFO)](https://hf.co/papers/2405.15682)는 기본 옵티마이저의 모멘텀 대신 평균화(averaging)와 보간(interpolation)을 조합하여 사용합니다. 덕분에 기존의 학습률 스케줄러와 달리, SFO는 학습률을 점진적으로 낮추는 절차가 아예 필요 없습니다. -SFO는 RAdam(`schedule_free_radam`), AdamW(`schedule_free_adamw`), SGD(`schedule_free_sgd`) 옵티마이저를 지원합니다. RAdam 스케줄러는 `warmup_steps`나 `warmup_ratio` 설정이 필요하지 않습니다. +SFO는 RAdam(`schedule_free_radam`), AdamW(`schedule_free_adamw`), SGD(`schedule_free_sgd`) 옵티마이저를 지원합니다. RAdam 스케줄러는 `warmup_steps`. 기본적으로 `lr_scheduler_type="constant"`로 설정하는 것을 권장합니다. 다른 `lr_scheduler_type` 값도 동작할 순 있으나, SFO 옵티마이저와 다른 학습률 스케줄을 함께 사용하면 SFO의 의도된 동작과 성능에 영향을 줄 수 있습니다. diff --git a/docs/source/ko/tasks/audio_classification.md b/docs/source/ko/tasks/audio_classification.md index 789d7ee88373..983692bc100c 100644 --- a/docs/source/ko/tasks/audio_classification.md +++ b/docs/source/ko/tasks/audio_classification.md @@ -221,7 +221,7 @@ MinDS-14 데이터 세트의 샘플링 속도는 8khz이므로(이 정보는 [ ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ko/tasks/image_classification.md b/docs/source/ko/tasks/image_classification.md index 54490a6f939a..3e1e829ae8d5 100644 --- a/docs/source/ko/tasks/image_classification.md +++ b/docs/source/ko/tasks/image_classification.md @@ -212,7 +212,7 @@ Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티에 ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/ko/tasks/video_classification.md b/docs/source/ko/tasks/video_classification.md index d39d669f8a6f..b220323aa2e3 100644 --- a/docs/source/ko/tasks/video_classification.md +++ b/docs/source/ko/tasks/video_classification.md @@ -357,7 +357,7 @@ You should probably TRAIN this model on a down-stream task to be able to use it ... learning_rate=5e-5, ... per_device_train_batch_size=batch_size, ... per_device_eval_batch_size=batch_size, -... warmup_ratio=0.1, +... warmup_steps=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", diff --git a/docs/source/zh/main_classes/deepspeed.md b/docs/source/zh/main_classes/deepspeed.md index 2fe0d554a7f6..8cd047ed5a0a 100644 --- a/docs/source/zh/main_classes/deepspeed.md +++ b/docs/source/zh/main_classes/deepspeed.md @@ -1206,7 +1206,7 @@ DeepSpeed支持`LRRangeTest`、`OneCycle`、`WarmupLR`和`WarmupDecayLR`学习 - 通过 `--lr_scheduler_type constant_with_warmup` 实现 `WarmupLR` - 通过 `--lr_scheduler_type linear` 实现 `WarmupDecayLR`。这也是 `--lr_scheduler_type` 的默认值,因此,如果不配置调度器,这将是默认配置的调度器。 -如果在配置文件中不配置 `scheduler` 条目,[`Trainer`] 将使用 `--lr_scheduler_type`、`--learning_rate` 和 `--warmup_steps` 或 `--warmup_ratio` 的值来配置其🤗 Transformers 版本。 +如果在配置文件中不配置 `scheduler` 条目,[`Trainer`] 将使用 `--lr_scheduler_type`、`--learning_rate` 和 `--warmup_steps` 的值来配置其🤗 Transformers 版本。 以下是 `WarmupLR` 的自动配置示例: @@ -1227,7 +1227,7 @@ DeepSpeed支持`LRRangeTest`、`OneCycle`、`WarmupLR`和`WarmupDecayLR`学习 - `warmup_min_lr` 的值为 `0`。 - `warmup_max_lr` 的值为 `--learning_rate`。 -- `warmup_num_steps` 的值为 `--warmup_steps`(如果提供)。否则,将使用 `--warmup_ratio` 乘以训练步骤的数量,并四舍五入。 +- `warmup_num_steps` 的值为 `--warmup_steps`(如果提供)。 - `total_num_steps` 的值为 `--max_steps` 或者如果没有提供,将在运行时根据环境、数据集的大小和其他命令行参数(对于 `WarmupDecayLR` 来说需要)自动推导。 当然,您可以接管任何或所有的配置值,并自行设置这些值: diff --git a/examples/pytorch/audio-classification/README.md b/examples/pytorch/audio-classification/README.md index 2910b3587623..8872563716c0 100644 --- a/examples/pytorch/audio-classification/README.md +++ b/examples/pytorch/audio-classification/README.md @@ -41,7 +41,7 @@ python run_audio_classification.py \ --learning_rate 3e-5 \ --max_length_seconds 1 \ --attention_mask False \ - --warmup_ratio 0.1 \ + --warmup_steps 0.1 \ --num_train_epochs 5 \ --per_device_train_batch_size 32 \ --gradient_accumulation_steps 4 \ @@ -82,7 +82,7 @@ python run_audio_classification.py \ --learning_rate 3e-4 \ --max_length_seconds 16 \ --attention_mask False \ - --warmup_ratio 0.1 \ + --warmup_steps 0.1 \ --num_train_epochs 10 \ --per_device_train_batch_size 8 \ --gradient_accumulation_steps 4 \ diff --git a/examples/pytorch/image-pretraining/README.md b/examples/pytorch/image-pretraining/README.md index 9e1eae48f834..f5a1289efb43 100644 --- a/examples/pytorch/image-pretraining/README.md +++ b/examples/pytorch/image-pretraining/README.md @@ -165,7 +165,7 @@ python run_mae.py \ --lr_scheduler_type cosine \ --weight_decay 0.05 \ --num_train_epochs 800 \ - --warmup_ratio 0.05 \ + --warmup_steps 0.05 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index cbd148153ca5..2b28dc98178a 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -753,8 +753,6 @@ def extract_hyperparameters_from_trainer(trainer): hyperparameters["optimizer"] = f"Use {optimizer_name} and the args are:\n{optimizer_args}" hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value - if trainer.args.warmup_ratio != 0.0: - hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio if trainer.args.warmup_steps != 0.0: hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps if trainer.args.max_steps != -1: diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index f13a2ad6e1da..48ca4fe57f02 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -297,10 +297,9 @@ class TrainingArguments: The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. lr_scheduler_kwargs ('dict', *optional*, defaults to {}): The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. - warmup_ratio (`float`, *optional*, defaults to 0.0): - Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. - warmup_steps (`int`, *optional*, defaults to 0): - Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. + warmup_steps (`int` or `float`, *optional*, defaults to 0): + Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. + If smaller than 1, will be interpreted as ratio of steps used for a linear warmup from 0 to `learning_rate`. log_level (`str`, *optional*, defaults to `passive`): Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and keeps the @@ -880,10 +879,10 @@ class TrainingArguments: ) }, ) - warmup_ratio: float = field( - default=0.0, metadata={"help": "Linear warmup over warmup_ratio fraction of total steps."} - ) - warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) + warmup_ratio: Optional[float] = field( + default=None, metadata={"help": "This argument is deprecated and will be removed in v5. Use `warmup_steps` instead as it also works with float values."}) + + warmup_steps: float = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) log_level: str = field( default="passive", @@ -1715,17 +1714,13 @@ def __post_init__(self): self.report_to = [] elif not isinstance(self.report_to, list): self.report_to = [self.report_to] + + if self.warmup_ratio is not None: + logger.warning("warmup_ratio is deprecated and will be removed in v5. Use `warmup_steps` instead.") + self.warmup_steps = self.warmup_ratio - if self.warmup_ratio < 0 or self.warmup_ratio > 1: - raise ValueError("warmup_ratio must lie in range [0,1]") - elif self.warmup_ratio > 0 and self.warmup_steps > 0: - logger.info( - "Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio" - " during training" - ) - - if not isinstance(self.warmup_steps, int) or self.warmup_steps < 0: - raise ValueError("warmup_steps must be of type int and must be 0 or a positive integer.") + if self.warmup_steps < 0: + raise ValueError("warmup_steps must be an integer or a float") if isinstance(self.fsdp, bool): self.fsdp = [FSDPOption.FULL_SHARD] if self.fsdp else "" @@ -2267,7 +2262,7 @@ def get_warmup_steps(self, num_training_steps: int): Get number of steps used for a linear warmup. """ warmup_steps = ( - self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio) + self.warmup_steps if self.warmup_steps >= 1 else math.ceil(num_training_steps * self.warmup_steps) ) return warmup_steps @@ -2763,8 +2758,8 @@ def set_lr_scheduler( name: Union[str, SchedulerType] = "linear", num_epochs: float = 3.0, max_steps: int = -1, - warmup_ratio: float = 0, - warmup_steps: int = 0, + warmup_steps: float = 0, + warmup_ratio: Optional[float] = None ): """ A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters. @@ -2779,11 +2774,9 @@ def set_lr_scheduler( If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. - warmup_ratio (`float`, *optional*, defaults to 0.0): - Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. - warmup_steps (`int`, *optional*, defaults to 0): - Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of - `warmup_ratio`. + warmup_steps (`float`, *optional*, defaults to 0): + Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. + If smaller than 1, will be interpreted as ratio of steps used for a linear warmup from 0 to `learning_rate`. Example: @@ -2791,15 +2784,18 @@ def set_lr_scheduler( >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") - >>> args = args.set_lr_scheduler(name="cosine", warmup_ratio=0.05) - >>> args.warmup_ratio + >>> args = args.set_lr_scheduler(name="cosine", warmup_steps=0.05) + >>> args.warmup_steps 0.05 ``` """ + if warmup_ratio is not None: + logger.warning("warmup_ratio is deprecated and will be removed in v5. Use `warmup_steps` instead.") + warmup_steps = warmup_ratio + self.lr_scheduler_type = SchedulerType(name) self.num_train_epochs = num_epochs self.max_steps = max_steps - self.warmup_ratio = warmup_ratio self.warmup_steps = warmup_steps return self From f0ebcf1f06cbe1ad765731fc4e900974c4ba7314 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 3 Oct 2025 16:20:23 +0000 Subject: [PATCH 4/8] better --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 48ca4fe57f02..1a9c8cb77625 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -2262,7 +2262,7 @@ def get_warmup_steps(self, num_training_steps: int): Get number of steps used for a linear warmup. """ warmup_steps = ( - self.warmup_steps if self.warmup_steps >= 1 else math.ceil(num_training_steps * self.warmup_steps) + int(self.warmup_steps) if self.warmup_steps >= 1 else math.ceil(num_training_steps * self.warmup_steps) ) return warmup_steps From 63db46fa1bb6c376fccefa8779914709a476117b Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 3 Oct 2025 16:23:11 +0000 Subject: [PATCH 5/8] fix --- src/transformers/training_args.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 1a9c8cb77625..7cc558ce4cfa 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -298,7 +298,7 @@ class TrainingArguments: lr_scheduler_kwargs ('dict', *optional*, defaults to {}): The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. warmup_steps (`int` or `float`, *optional*, defaults to 0): - Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. + Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of steps used for a linear warmup from 0 to `learning_rate`. log_level (`str`, *optional*, defaults to `passive`): Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', @@ -880,7 +880,11 @@ class TrainingArguments: }, ) warmup_ratio: Optional[float] = field( - default=None, metadata={"help": "This argument is deprecated and will be removed in v5. Use `warmup_steps` instead as it also works with float values."}) + default=None, + metadata={ + "help": "This argument is deprecated and will be removed in v5. Use `warmup_steps` instead as it also works with float values." + }, + ) warmup_steps: float = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) @@ -1714,7 +1718,7 @@ def __post_init__(self): self.report_to = [] elif not isinstance(self.report_to, list): self.report_to = [self.report_to] - + if self.warmup_ratio is not None: logger.warning("warmup_ratio is deprecated and will be removed in v5. Use `warmup_steps` instead.") self.warmup_steps = self.warmup_ratio @@ -2759,7 +2763,7 @@ def set_lr_scheduler( num_epochs: float = 3.0, max_steps: int = -1, warmup_steps: float = 0, - warmup_ratio: Optional[float] = None + warmup_ratio: Optional[float] = None, ): """ A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters. @@ -2775,7 +2779,7 @@ def set_lr_scheduler( For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_steps (`float`, *optional*, defaults to 0): - Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. + Number of steps used for a linear warmup from 0 to `learning_rate`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of steps used for a linear warmup from 0 to `learning_rate`. Example: From 5d3ac3d7385519946ec91963e118f1523786bf8c Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Mon, 6 Oct 2025 09:31:43 +0000 Subject: [PATCH 6/8] Revert "style" This reverts commit cf4f9e7c4f7837a88eea6eeabf8b4dfe9455f6dc. --- examples/pytorch/contrastive-image-text/run_clip.py | 1 + .../image-classification/run_image_classification.py | 1 + examples/pytorch/image-pretraining/run_mae.py | 1 + examples/pytorch/image-pretraining/run_mim.py | 1 + .../instance-segmentation/run_instance_segmentation.py | 1 + examples/pytorch/language-modeling/run_clm.py | 1 + examples/pytorch/language-modeling/run_fim.py | 1 + examples/pytorch/language-modeling/run_mlm.py | 1 + examples/pytorch/language-modeling/run_plm.py | 1 + examples/pytorch/multiple-choice/run_swag.py | 1 + examples/pytorch/object-detection/run_object_detection.py | 3 ++- examples/pytorch/question-answering/run_qa.py | 1 + examples/pytorch/question-answering/run_qa_beam_search.py | 1 + examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../semantic-segmentation/run_semantic_segmentation.py | 1 + .../speech-recognition/run_speech_recognition_ctc.py | 4 ++-- .../run_speech_recognition_ctc_adapter.py | 4 ++-- .../speech-recognition/run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 1 + examples/pytorch/text-classification/run_classification.py | 1 + examples/pytorch/text-classification/run_glue.py | 1 + examples/pytorch/text-classification/run_xnli.py | 1 + examples/pytorch/token-classification/run_ner.py | 1 + examples/pytorch/translation/run_translation.py | 1 + src/transformers/training_args.py | 6 +++++- .../sagemaker/scripts/pytorch/run_glue_model_parallelism.py | 1 + 26 files changed, 33 insertions(+), 8 deletions(-) diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 5f1256813d8c..51b31edb92d0 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -55,6 +55,7 @@ TrainingArguments, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 04107ffdfb3e..6da5876fd0ca 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -58,6 +58,7 @@ TrainingArguments, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index ea350a855feb..14da341177bb 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -41,6 +41,7 @@ ViTMAEConfig, ViTMAEForPreTraining, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index c0b86d009ba8..6a17d4242e17 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -44,6 +44,7 @@ Trainer, TrainingArguments, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/instance-segmentation/run_instance_segmentation.py b/examples/pytorch/instance-segmentation/run_instance_segmentation.py index 8e6342a31607..5d9e992c4b9e 100644 --- a/examples/pytorch/instance-segmentation/run_instance_segmentation.py +++ b/examples/pytorch/instance-segmentation/run_instance_segmentation.py @@ -49,6 +49,7 @@ ) from transformers.image_processing_utils import BatchFeature from transformers.trainer import EvalPrediction +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 986f873870dc..eda38a2e0d39 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -63,6 +63,7 @@ set_seed, ) from transformers.testing_utils import CaptureLogger +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_fim.py b/examples/pytorch/language-modeling/run_fim.py index 14457275811d..4e309e555e0d 100644 --- a/examples/pytorch/language-modeling/run_fim.py +++ b/examples/pytorch/language-modeling/run_fim.py @@ -66,6 +66,7 @@ ) from transformers.integrations import is_deepspeed_zero3_enabled from transformers.testing_utils import CaptureLogger +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 80ad49bef983..9e9a2bd680a7 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -62,6 +62,7 @@ is_torch_xla_available, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index f81fd37d5e40..b0df1dfb9bc7 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -55,6 +55,7 @@ XLNetLMHeadModel, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index d1f785d51ca1..7933b7f968e9 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -52,6 +52,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version diff --git a/examples/pytorch/object-detection/run_object_detection.py b/examples/pytorch/object-detection/run_object_detection.py index e2018bf97226..64d988e2bd98 100644 --- a/examples/pytorch/object-detection/run_object_detection.py +++ b/examples/pytorch/object-detection/run_object_detection.py @@ -51,6 +51,7 @@ from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.trainer import EvalPrediction +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -492,7 +493,7 @@ def main(): # Training if training_args.do_train: - train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index ab5bf3267b74..21fe714c7233 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -43,6 +43,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 882f4fa9b272..8b31b20fee2f 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -41,6 +41,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 0c0722510933..738ca3b610e4 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -39,7 +39,7 @@ Seq2SeqTrainingArguments, set_seed, ) -from transformers.trainer_utils import EvalLoopOutput, EvalPrediction +from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index 6fd83df7aa8f..b9642e0454d6 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -52,6 +52,7 @@ TrainingArguments, default_data_collator, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index f94105cd1d10..b32a63061abe 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -55,7 +55,7 @@ Wav2Vec2Processor, set_seed, ) -from transformers.trainer_utils import is_main_process +from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -766,7 +766,7 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - if os.path.isdir(model_args.model_name_or_path): + elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index adfe91a31d5f..2e7ac34ddc56 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -58,7 +58,7 @@ set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import WAV2VEC2_ADAPTER_SAFE_FILE -from transformers.trainer_utils import is_main_process +from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -758,7 +758,7 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist - if os.path.isdir(model_args.model_name_or_path): + elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 7e124d4326f3..77df3872e771 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -54,7 +54,7 @@ Seq2SeqTrainingArguments, set_seed, ) -from transformers.trainer_utils import is_main_process +from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 7a3cf76188f8..2ab7ff3d0759 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -61,6 +61,7 @@ Seq2SeqTrainingArguments, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, is_offline_mode from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index 78a9d0094911..bf62b52e355c 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -55,6 +55,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index cc2e6e1fe59e..101dff28e986 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -57,6 +57,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index b64ac29552f1..8a57d101e997 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -56,6 +56,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 22ab28e651a7..19b8b59ac36a 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -54,6 +54,7 @@ TrainingArguments, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index b068cd3ce2de..da3cb9bdc1ec 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -60,6 +60,7 @@ default_data_collator, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 7cc558ce4cfa..55b7c4fc2a29 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -805,7 +805,11 @@ class TrainingArguments: ) overwrite_output_dir: bool = field( default=False, - metadata={"help": ("This argument is deprecated and will be removed in v5.")}, + metadata={ + "help": ( + "This argument is deprecated and will be removed in v5." + ) + }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index c07ad30d7f29..f5a259792518 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -38,6 +38,7 @@ set_seed, ) from transformers.trainer import Trainer +from transformers.trainer_utils import get_last_checkpoint from transformers.training_args import TrainingArguments from transformers.utils import check_min_version From f3cdb00aae2410df41f5c64dd6a7c159002c38d6 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Mon, 6 Oct 2025 09:32:40 +0000 Subject: [PATCH 7/8] Revert "dep" This reverts commit 1800beb13f407ddb881d0af936860643e84ba085. --- ISSUES.md | 2 +- docs/source/ar/run_scripts.md | 26 +++++++++++++++++ docs/source/ar/trainer.md | 2 ++ docs/source/de/run_scripts.md | 26 +++++++++++++++++ docs/source/en/deepspeed.md | 4 +-- docs/source/en/run_scripts.md | 7 ++++- docs/source/en/trainer.md | 3 +- docs/source/es/run_scripts.md | 26 +++++++++++++++++ docs/source/es/trainer.md | 2 ++ docs/source/fr/run_scripts_fr.md | 28 ++++++++++++++++++- docs/source/it/run_scripts.md | 26 +++++++++++++++++ docs/source/ja/main_classes/deepspeed.md | 6 ++-- docs/source/ja/main_classes/trainer.md | 3 ++ docs/source/ja/run_scripts.md | 27 ++++++++++++++++++ docs/source/ko/deepspeed.md | 4 +-- docs/source/ko/perf_train_special.md | 3 +- docs/source/ko/run_scripts.md | 26 +++++++++++++++++ docs/source/ko/trainer.md | 2 ++ docs/source/pt/run_scripts.md | 26 +++++++++++++++++ docs/source/zh/main_classes/deepspeed.md | 6 ++-- docs/source/zh/main_classes/trainer.md | 3 ++ docs/source/zh/perf_train_special.md | 1 + docs/source/zh/run_scripts.md | 27 ++++++++++++++++++ .../multiple_choice/run_multiple_choice.py | 11 ++++++++ .../legacy/question-answering/run_squad.py | 15 ++++++++++ .../question-answering/run_squad_trainer.py | 11 ++++++++ examples/legacy/run_language_modeling.py | 10 +++++++ examples/legacy/run_swag.py | 15 ++++++++++ examples/legacy/seq2seq/finetune_trainer.py | 3 ++ .../seq2seq/train_distil_marian_enro.sh | 2 +- .../seq2seq/train_distil_marian_enro_tpu.sh | 2 +- .../legacy/seq2seq/train_distilbart_cnn.sh | 2 +- .../legacy/seq2seq/train_mbart_cc25_enro.sh | 2 +- examples/legacy/seq2seq/utils.py | 24 ++++++++++++++++ .../legacy/token-classification/run_ner.py | 11 ++++++++ examples/pytorch/README.md | 3 +- .../pytorch/audio-classification/README.md | 2 ++ .../run_audio_classification.py | 18 ++++++++++++ .../pytorch/contrastive-image-text/README.md | 1 + .../contrastive-image-text/run_clip.py | 17 +++++++++++ .../run_image_classification.py | 17 +++++++++++ examples/pytorch/image-pretraining/README.md | 2 ++ examples/pytorch/image-pretraining/run_mae.py | 17 +++++++++++ examples/pytorch/image-pretraining/run_mim.py | 17 +++++++++++ .../run_instance_segmentation.py | 12 ++++++++ examples/pytorch/language-modeling/run_clm.py | 17 +++++++++++ examples/pytorch/language-modeling/run_fim.py | 17 +++++++++++ examples/pytorch/language-modeling/run_mlm.py | 17 +++++++++++ examples/pytorch/language-modeling/run_plm.py | 17 +++++++++++ examples/pytorch/multiple-choice/run_swag.py | 17 +++++++++++ .../object-detection/run_object_detection.py | 17 +++++++++++ examples/pytorch/old_test_xla_examples.py | 1 + examples/pytorch/question-answering/run_qa.py | 17 +++++++++++ .../question-answering/run_qa_beam_search.py | 17 +++++++++++ .../question-answering/run_seq2seq_qa.py | 17 +++++++++++ .../run_semantic_segmentation.py | 17 +++++++++++ examples/pytorch/speech-recognition/README.md | 7 +++++ .../run_speech_recognition_ctc.py | 19 ++++++++++++- .../run_speech_recognition_ctc_adapter.py | 19 ++++++++++++- .../run_speech_recognition_seq2seq.py | 17 +++++++++++ examples/pytorch/summarization/README.md | 2 ++ .../summarization/run_summarization.py | 17 +++++++++++ examples/pytorch/test_pytorch_examples.py | 17 +++++++++++ .../text-classification/run_classification.py | 17 +++++++++++ .../pytorch/text-classification/run_glue.py | 17 +++++++++++ .../pytorch/text-classification/run_xnli.py | 17 +++++++++++ .../pytorch/token-classification/run_ner.py | 17 +++++++++++ examples/pytorch/translation/README.md | 5 ++++ .../pytorch/translation/run_translation.py | 17 +++++++++++ .../integrations/integration_utils.py | 2 +- src/transformers/training_args.py | 6 +++- .../run_{{cookiecutter.example_shortcut}}.py | 23 +++++++++++++-- tests/deepspeed/test_deepspeed.py | 3 ++ tests/deepspeed/test_model_zoo.py | 1 + tests/extended/test_trainer_ext.py | 1 + tests/fsdp/test_fsdp.py | 1 + tests/sagemaker/conftest.py | 1 + .../pytorch/run_glue_model_parallelism.py | 19 ++++++++++++- tests/trainer/test_trainer.py | 1 + 79 files changed, 890 insertions(+), 27 deletions(-) diff --git a/ISSUES.md b/ISSUES.md index 77de2998ad0b..c87bd9fc2c3f 100644 --- a/ISSUES.md +++ b/ISSUES.md @@ -153,7 +153,7 @@ You are not required to read the following guidelines before opening an issue. H cd examples/seq2seq torchrun --nproc_per_node=2 ./finetune_trainer.py \ --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \ - --output_dir output_dir \ + --output_dir output_dir --overwrite_output_dir \ --do_train --n_train 500 --num_train_epochs 1 \ --per_device_train_batch_size 1 --freeze_embeds \ --src_lang en_XX --tgt_lang ro_RO --task translation \ diff --git a/docs/source/ar/run_scripts.md b/docs/source/ar/run_scripts.md index 052e6e1d8440..238844dc055e 100644 --- a/docs/source/ar/run_scripts.md +++ b/docs/source/ar/run_scripts.md @@ -93,6 +93,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -116,6 +117,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -138,6 +140,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -194,6 +197,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -221,6 +225,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -234,6 +239,25 @@ examples/pytorch/summarization/run_summarization.py -h خيار آخر مفيد لتمكينه هو استئناف التدريب من نقطة تفتيش سابقة. سيضمن ذلك أنك تستطيع الاستمرار من حيث توقفت دون البدء من جديد إذا تم مقاطعة تدريبك. هناك طريقتان لاستئناف التدريب من نقطة تفتيش. +تستخدم الطريقة الأولى المعلمة `output_dir previous_output_dir` لاستئناف التدريب من أحدث نقطة تفتيش مخزنة في `output_dir`. في هذه الحالة، يجب عليك إزالة `overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +تستخدم الطريقة الثانية معلمة `resume_from_checkpoint path_to_specific_checkpoint` لاستئناف التدريب من مجلد نقطة تفتيش محددة. + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -245,6 +269,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -276,5 +301,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ar/trainer.md b/docs/source/ar/trainer.md index be9e44cfa620..1784d76a4ecb 100644 --- a/docs/source/ar/trainer.md +++ b/docs/source/ar/trainer.md @@ -611,6 +611,7 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` يمكنك أيضًا تحديد المعلمات من ملف `config_file.yaml` مباشرة في سطر الأوامر: @@ -633,6 +634,7 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` اطلع على برنامج تعليمي [Launching your Accelerate scripts](https://huggingface.co/docs/accelerate/basic_tutorials/launch) لمعرفة المزيد حول `accelerate_launch` والتكوينات المخصصة. diff --git a/docs/source/de/run_scripts.md b/docs/source/de/run_scripts.md index 833d886c7e81..004f67291979 100644 --- a/docs/source/de/run_scripts.md +++ b/docs/source/de/run_scripts.md @@ -98,6 +98,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -121,6 +122,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -142,6 +144,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -198,6 +201,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -225,6 +229,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -238,6 +243,25 @@ examples/pytorch/summarization/run_summarization.py -h Eine weitere hilfreiche Option, die Sie aktivieren können, ist die Wiederaufnahme des Trainings von einem früheren Kontrollpunkt aus. Auf diese Weise können Sie im Falle einer Unterbrechung Ihres Trainings dort weitermachen, wo Sie aufgehört haben, ohne von vorne beginnen zu müssen. Es gibt zwei Methoden, um das Training von einem Kontrollpunkt aus wieder aufzunehmen. +Die erste Methode verwendet das Argument `output_dir previous_output_dir`, um das Training ab dem letzten in `output_dir` gespeicherten Kontrollpunkt wieder aufzunehmen. In diesem Fall sollten Sie `overwrite_output_dir` entfernen: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +Die zweite Methode verwendet das Argument `Resume_from_checkpoint path_to_specific_checkpoint`, um das Training ab einem bestimmten Checkpoint-Ordner wieder aufzunehmen. + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -249,6 +273,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -280,5 +305,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/en/deepspeed.md b/docs/source/en/deepspeed.md index 7971854011ee..55b8408a43bb 100644 --- a/docs/source/en/deepspeed.md +++ b/docs/source/en/deepspeed.md @@ -593,7 +593,7 @@ To deploy DeepSpeed on multiple GPUs, add `--num_gpus`. You don't need to add `- deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -616,7 +616,7 @@ To deploy DeepSpeed on a single GPU, add `--num_gpus`. You don't need to add `-- deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro diff --git a/docs/source/en/run_scripts.md b/docs/source/en/run_scripts.md index 74473b5228e3..594eb84b02a1 100644 --- a/docs/source/en/run_scripts.md +++ b/docs/source/en/run_scripts.md @@ -61,8 +61,9 @@ The example below fine-tunes [T5-small](https://huggingface.co/google-t5/t5-smal The example script downloads and preprocesses a dataset, and then fine-tunes it with [`Trainer`] with a supported model architecture. -Resuming training from a checkpoint is very useful if training is interrupted because you don't have to start over again: +Resuming training from a checkpoint is very useful if training is interrupted because you don't have to start over again. There are two ways to resume training from a checkpoint. +* `--output dir previous_output_dir` resumes training from the latest checkpoint stored in `output_dir`. Remove the `--overwrite_output_dir` parameter if you're using this method. * `--resume_from_checkpoint path_to_specific_checkpoint` resumes training from a specific checkpoint folder. Share your model on the [Hub](https://huggingface.co/) with the `--push_to_hub` parameter. It creates a repository and uploads the model to the folder name specified in `--output_dir`. You could also use the `--push_to_hub_model_id` parameter to specify the repository name. @@ -84,6 +85,9 @@ python examples/pytorch/summarization/run_summarization.py \ --per_device_eval_batch_size=4 \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ + # remove if using `output_dir previous_output_dir` + # --overwrite_output_dir \ + --output_dir previous_output_dir \ # --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate \ ``` @@ -164,6 +168,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate \ diff --git a/docs/source/en/trainer.md b/docs/source/en/trainer.md index 98b23a3e7b94..32f14bc41da3 100644 --- a/docs/source/en/trainer.md +++ b/docs/source/en/trainer.md @@ -361,7 +361,8 @@ accelerate launch \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ + --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` > [!TIP] diff --git a/docs/source/es/run_scripts.md b/docs/source/es/run_scripts.md index 6db78af2ce5e..462eb5bc3034 100644 --- a/docs/source/es/run_scripts.md +++ b/docs/source/es/run_scripts.md @@ -98,6 +98,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -121,6 +122,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -142,6 +144,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -198,6 +201,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -225,6 +229,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -238,6 +243,25 @@ examples/pytorch/summarization/run_summarization.py -h Otra opción útil para habilitar es reanudar el entrenamiento desde un punto de control anterior. Esto asegurará que puedas continuar donde lo dejaste sin comenzar de nuevo si tu entrenamiento se interrumpe. Hay dos métodos para reanudar el entrenamiento desde un punto de control. +El primer método utiliza el argumento `output_dir previous_output_dir` para reanudar el entrenamiento desde el último punto de control almacenado en `output_dir`. En este caso, debes eliminar `overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +El segundo método utiliza el argumento `resume_from_checkpoint path_to_specific_checkpoint` para reanudar el entrenamiento desde una carpeta de punto de control específica. + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -249,6 +273,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -280,5 +305,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/es/trainer.md b/docs/source/es/trainer.md index 335ec54b39bd..4455521f5317 100644 --- a/docs/source/es/trainer.md +++ b/docs/source/es/trainer.md @@ -381,6 +381,7 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` También puedes especificar los parámetros del archivo config_file.yaml directamente en la línea de comandos: @@ -403,6 +404,7 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` Consulta el tutorial [Lanzamiento de tus scripts con Accelerate](https://huggingface.co/docs/accelerate/basic_tutorials/launch) para obtener más información sobre `accelerate_launch` y las configuraciones personalizadas. diff --git a/docs/source/fr/run_scripts_fr.md b/docs/source/fr/run_scripts_fr.md index 43e3ed024150..1acf683253da 100644 --- a/docs/source/fr/run_scripts_fr.md +++ b/docs/source/fr/run_scripts_fr.md @@ -100,6 +100,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -123,6 +124,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -145,6 +147,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -201,6 +204,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -227,6 +231,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -238,7 +243,26 @@ examples/pytorch/summarization/run_summarization.py -h ## Reprendre l'entraînement à partir d'un point de contrôle -Une autre option utile est de reprendre l'entraînement à partir d'un point de contrôle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu: +Une autre option utile est de reprendre l'entraînement à partir d'un point de contrôle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu. Il existe deux méthodes pour reprendre l'entraînement à partir d'un point de contrôle. + +La première méthode utilise l'argument `output_dir previous_output_dir` pour reprendre l'entraînement à partir du dernier point de contrôle stocké dans `output_dir`. Dans ce cas, vous devez supprimer l'argument `overwrite_output_dir`. + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +La seconde méthode utilise l'argument `resume_from_checkpoint path_to_specific_checkpoint` pour reprendre l'entraînement à partir d'un dossier de point de contrôle spécifique. ```bash python examples/pytorch/summarization/run_summarization.py @@ -251,6 +275,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -283,5 +308,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/it/run_scripts.md b/docs/source/it/run_scripts.md index e0986b839771..ad7df423cb96 100644 --- a/docs/source/it/run_scripts.md +++ b/docs/source/it/run_scripts.md @@ -98,6 +98,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -121,6 +122,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -142,6 +144,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -198,6 +201,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -225,6 +229,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -238,6 +243,25 @@ examples/pytorch/summarization/run_summarization.py -h Un'altra utile opzione è riavviare un addestramento da un checkpoint precedente. Questo garantirà che tu possa riprendere da dove hai interrotto senza ricominciare se l'addestramento viene interrotto. Ci sono due metodi per riavviare l'addestramento da un checkpoint: +Il primo metodo usa l'argomento `output_dir previous_output_dir` per riavviare l'addestramento dall'ultima versione del checkpoint contenuto in `output_dir`. In questo caso, dovresti rimuovere `overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +Il secondo metodo usa l'argomento `resume_from_checkpoint path_to_specific_checkpoint` per riavviare un addestramento da una specifica cartella di checkpoint. + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -249,6 +273,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -280,5 +305,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ja/main_classes/deepspeed.md b/docs/source/ja/main_classes/deepspeed.md index 84d2bfd492a1..aaa8191621bc 100644 --- a/docs/source/ja/main_classes/deepspeed.md +++ b/docs/source/ja/main_classes/deepspeed.md @@ -188,7 +188,7 @@ deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.js deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -211,7 +211,7 @@ DeepSpeed 関連の引数が 2 つありますが、簡単にするためであ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -1788,7 +1788,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ --model_name_or_path google-t5/t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ ---per_device_eval_batch_size 4 \ +--overwrite_output_dir --per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config "ro-en" --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix "translate English to Romanian: " diff --git a/docs/source/ja/main_classes/trainer.md b/docs/source/ja/main_classes/trainer.md index e5d55ff77b4c..e6e6e28d308b 100644 --- a/docs/source/ja/main_classes/trainer.md +++ b/docs/source/ja/main_classes/trainer.md @@ -534,6 +534,7 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` **注意すべきいくつかの注意事項** @@ -668,6 +669,7 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir ``` 4. `accelerate launch`するための cmd 引数を直接使用することもできます。上の例は次のようにマッピングされます。 @@ -692,6 +694,7 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir ``` 詳細については、🤗 Accelerate CLI ガイドを参照してください: [🤗 Accelerate スクリプトの起動](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 diff --git a/docs/source/ja/run_scripts.md b/docs/source/ja/run_scripts.md index bf0ed8627024..ee738e3e4313 100644 --- a/docs/source/ja/run_scripts.md +++ b/docs/source/ja/run_scripts.md @@ -104,6 +104,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -130,6 +131,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -151,6 +153,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -209,6 +212,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -236,6 +240,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -249,6 +254,26 @@ examples/pytorch/summarization/run_summarization.py -h 以前のチェックポイントからトレーニングを再開するための役立つオプションもあります。これにより、トレーニングが中断された場合でも、最初からやり直すことなく、中断したところから再開できます。チェックポイントからトレーニングを再開するための2つの方法があります。 +最初の方法は、`output_dir previous_output_dir` 引数を使用して、`output_dir` に保存された最新のチェックポイントからトレーニングを再開する方法です。この場合、`overwrite_output_dir` を削除する必要があります: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +2番目の方法では、`resume_from_checkpoint path_to_specific_checkpoint` 引数を使用して、特定のチェックポイントフォルダからトレーニングを再開します。 + + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -260,6 +285,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -293,6 +319,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ko/deepspeed.md b/docs/source/ko/deepspeed.md index 823865168be4..d0955ee3db80 100644 --- a/docs/source/ko/deepspeed.md +++ b/docs/source/ko/deepspeed.md @@ -590,7 +590,7 @@ bf16은 설정 파일에서 설정하거나 다음 인수를 전달하면 명령 deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -605,7 +605,7 @@ deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro diff --git a/docs/source/ko/perf_train_special.md b/docs/source/ko/perf_train_special.md index 7927f3748f07..188db542f7c0 100644 --- a/docs/source/ko/perf_train_special.md +++ b/docs/source/ko/perf_train_special.md @@ -54,7 +54,8 @@ python examples/pytorch/text-classification/run_glue.py \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ + --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` `gloco`와 `nccl`과 같은 [분산 학습 백엔드](https://pytorch.org/docs/stable/distributed.html#backends)는 `mps` 장치에서 지원되지 않으므로, MPS 백엔드에서는 단일 GPU로만 학습이 가능합니다. diff --git a/docs/source/ko/run_scripts.md b/docs/source/ko/run_scripts.md index 2db8fe16c600..874834a1f32a 100644 --- a/docs/source/ko/run_scripts.md +++ b/docs/source/ko/run_scripts.md @@ -106,6 +106,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -130,6 +131,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -154,6 +156,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -213,6 +216,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -241,6 +245,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -257,6 +262,8 @@ examples/pytorch/summarization/run_summarization.py -h 이렇게 하면 훈련이 중단되더라도 처음부터 다시 시작하지 않고 중단한 부분부터 다시 시작할 수 있습니다. 체크포인트에서 훈련을 재개하는 방법에는 두 가지가 있습니다. +첫 번째는 `output_dir previous_output_dir` 인수를 사용하여 `output_dir`에 저장된 최신 체크포인트부터 훈련을 재개하는 방법입니다. +이 경우 `overwrite_output_dir`을 제거해야 합니다: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -268,6 +275,24 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +두 번째는 `resume_from_checkpoint path_to_specific_checkpoint` 인수를 사용하여 특정 체크포인트 폴더에서 훈련을 재개하는 방법입니다. + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -300,5 +325,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/ko/trainer.md b/docs/source/ko/trainer.md index 65c3fbef982f..d753627c86fb 100644 --- a/docs/source/ko/trainer.md +++ b/docs/source/ko/trainer.md @@ -505,6 +505,7 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` `config_file.yaml` 파일의 매개변수를 직접 지정할 수도 있습니다: @@ -527,6 +528,7 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` `accelerate_launch`와 사용자 정의 구성에 대해 더 알아보려면 [Accelerate 스크립트 실행](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 튜토리얼을 확인하세요. \ No newline at end of file diff --git a/docs/source/pt/run_scripts.md b/docs/source/pt/run_scripts.md index 72060f98571d..4b4baf18988f 100644 --- a/docs/source/pt/run_scripts.md +++ b/docs/source/pt/run_scripts.md @@ -99,6 +99,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -122,6 +123,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -143,6 +145,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -200,6 +203,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -227,6 +231,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -240,6 +245,25 @@ examples/pytorch/summarization/run_summarization.py -h Outra opção útil para habilitar é retomar o treinamento de um checkpoint anterior. Isso garantirá que você possa continuar de onde parou sem recomeçar se o seu treinamento for interrompido. Existem dois métodos para retomar o treinamento a partir de um checkpoint. +O primeiro método usa o argumento `output_dir previous_output_dir` para retomar o treinamento do último checkpoint armazenado em `output_dir`. Neste caso, você deve remover `overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +O segundo método usa o argumento `resume_from_checkpoint path_to_specific_checkpoint` para retomar o treinamento de uma pasta de checkpoint específica. + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -251,6 +275,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -282,5 +307,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/docs/source/zh/main_classes/deepspeed.md b/docs/source/zh/main_classes/deepspeed.md index 8cd047ed5a0a..2e27e04cae45 100644 --- a/docs/source/zh/main_classes/deepspeed.md +++ b/docs/source/zh/main_classes/deepspeed.md @@ -179,7 +179,7 @@ deepspeed --num_gpus=2 your_program.py --deepspeed ds_config.js deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -202,7 +202,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ ---output_dir output_dir --fp16 \ +--output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro @@ -1659,7 +1659,7 @@ deepspeed examples/pytorch/translation/run_translation.py \ --model_name_or_path google-t5/t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ ---per_device_eval_batch_size 4 \ +--overwrite_output_dir --per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config "ro-en" --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix "translate English to Romanian: " diff --git a/docs/source/zh/main_classes/trainer.md b/docs/source/zh/main_classes/trainer.md index 5d587fd6d575..159477fe64a0 100644 --- a/docs/source/zh/main_classes/trainer.md +++ b/docs/source/zh/main_classes/trainer.md @@ -471,6 +471,7 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` **需要注意的一些注意事项** @@ -605,6 +606,7 @@ accelerate launch \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir ``` 4. 你也可以直接使用`accelerate launch`的cmd参数。上面的示例将映射到: @@ -629,6 +631,7 @@ accelerate launch --num_processes=2 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ +--overwrite_output_dir ``` 有关更多信息,请参阅 🤗 Accelerate CLI 指南:[启动您的 🤗 Accelerate 脚本](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 diff --git a/docs/source/zh/perf_train_special.md b/docs/source/zh/perf_train_special.md index 8e7929ecb1cd..ee8553475679 100644 --- a/docs/source/zh/perf_train_special.md +++ b/docs/source/zh/perf_train_special.md @@ -50,6 +50,7 @@ python examples/pytorch/text-classification/run_glue.py \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ + --overwrite_output_dir ``` 用于[分布式设置](https://pytorch.org/docs/stable/distributed.html#backends)的后端(如`gloo`和`nccl`)不支持`mps`设备,这也意味着使用 MPS 后端时只能在单个 GPU 上进行训练。 diff --git a/docs/source/zh/run_scripts.md b/docs/source/zh/run_scripts.md index 60e78fcb9e9b..32bf2342f9aa 100644 --- a/docs/source/zh/run_scripts.md +++ b/docs/source/zh/run_scripts.md @@ -99,6 +99,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -123,6 +124,7 @@ torchrun \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -143,6 +145,7 @@ python xla_spawn.py --num_cores 8 \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -199,6 +202,7 @@ python examples/pytorch/summarization/run_summarization.py \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate @@ -227,6 +231,7 @@ python examples/pytorch/summarization/run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -240,6 +245,26 @@ examples/pytorch/summarization/run_summarization.py -h 另一个有用的选项是从之前的checkpoint恢复训练。这将确保在训练中断时,您可以从之前停止的地方继续进行,而无需重新开始。有两种方法可以从checkpoint恢复训练。 +第一种方法使用`output_dir previous_output_dir`参数从存储在`output_dir`中的最新的checkpoint恢复训练。在这种情况下,您应该删除`overwrite_output_dir`: + +```bash +python examples/pytorch/summarization/run_summarization.py + --model_name_or_path google-t5/t5-small \ + --do_train \ + --do_eval \ + --dataset_name cnn_dailymail \ + --dataset_config "3.0.0" \ + --source_prefix "summarize: " \ + --output_dir /tmp/tst-summarization \ + --per_device_train_batch_size=4 \ + --per_device_eval_batch_size=4 \ + --output_dir previous_output_dir \ + --predict_with_generate +``` + +第二种方法使用`resume_from_checkpoint path_to_specific_checkpoint`参数从特定的checkpoint文件夹恢复训练。 + + ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ @@ -251,6 +276,7 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` @@ -283,5 +309,6 @@ python examples/pytorch/summarization/run_summarization.py --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/examples/legacy/multiple_choice/run_multiple_choice.py b/examples/legacy/multiple_choice/run_multiple_choice.py index 92947e2092cf..aa1297656a90 100644 --- a/examples/legacy/multiple_choice/run_multiple_choice.py +++ b/examples/legacy/multiple_choice/run_multiple_choice.py @@ -95,6 +95,17 @@ def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" + " --overwrite_output_dir to overcome." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/question-answering/run_squad.py b/examples/legacy/question-answering/run_squad.py index 126fa197ee27..39ba14a12afa 100644 --- a/examples/legacy/question-answering/run_squad.py +++ b/examples/legacy/question-answering/run_squad.py @@ -641,6 +641,9 @@ def main(): help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") + parser.add_argument( + "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" + ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) @@ -674,6 +677,18 @@ def main(): "stride or increase the maximum length to ensure the features are correctly built." ) + if ( + os.path.exists(args.output_dir) + and os.listdir(args.output_dir) + and args.do_train + and not args.overwrite_output_dir + ): + raise ValueError( + "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( + args.output_dir + ) + ) + # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script diff --git a/examples/legacy/question-answering/run_squad_trainer.py b/examples/legacy/question-answering/run_squad_trainer.py index 5288e3019b9a..d3730d1bc0ba 100644 --- a/examples/legacy/question-answering/run_squad_trainer.py +++ b/examples/legacy/question-answering/run_squad_trainer.py @@ -76,6 +76,17 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" + " --overwrite_output_dir to overcome." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/run_language_modeling.py b/examples/legacy/run_language_modeling.py index 64c92fa205e0..8a6b8eded34a 100755 --- a/examples/legacy/run_language_modeling.py +++ b/examples/legacy/run_language_modeling.py @@ -207,6 +207,16 @@ def main(): "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" + " --overwrite_output_dir to overcome." + ) # Setup logging logging.basicConfig( diff --git a/examples/legacy/run_swag.py b/examples/legacy/run_swag.py index 8c80cf8a347a..221f9cc9c98d 100755 --- a/examples/legacy/run_swag.py +++ b/examples/legacy/run_swag.py @@ -557,6 +557,9 @@ def main(): help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") + parser.add_argument( + "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" + ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) @@ -581,6 +584,18 @@ def main(): parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() + if ( + os.path.exists(args.output_dir) + and os.listdir(args.output_dir) + and args.do_train + and not args.overwrite_output_dir + ): + raise ValueError( + "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( + args.output_dir + ) + ) + # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script diff --git a/examples/legacy/seq2seq/finetune_trainer.py b/examples/legacy/seq2seq/finetune_trainer.py index 54ca2c898c82..44f5a75eda0a 100755 --- a/examples/legacy/seq2seq/finetune_trainer.py +++ b/examples/legacy/seq2seq/finetune_trainer.py @@ -39,6 +39,7 @@ Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, + check_output_dir, freeze_embeds, freeze_params, lmap, @@ -167,6 +168,8 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + check_output_dir(training_args) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", diff --git a/examples/legacy/seq2seq/train_distil_marian_enro.sh b/examples/legacy/seq2seq/train_distil_marian_enro.sh index d0e1075bb079..5e86a6991c57 100644 --- a/examples/legacy/seq2seq/train_distil_marian_enro.sh +++ b/examples/legacy/seq2seq/train_distil_marian_enro.sh @@ -20,7 +20,7 @@ export MAX_LEN=128 python finetune_trainer.py \ --tokenizer_name $m --model_name_or_path $m \ --data_dir $ENRO_DIR \ - --output_dir marian_en_ro_6_3 \ + --output_dir marian_en_ro_6_3 --overwrite_output_dir \ --learning_rate=3e-4 \ --warmup_steps 500 --sortish_sampler \ --fp16 \ diff --git a/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh b/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh index fcf4ea13698f..00ef67226196 100644 --- a/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh +++ b/examples/legacy/seq2seq/train_distil_marian_enro_tpu.sh @@ -22,7 +22,7 @@ python xla_spawn.py --num_cores $TPU_NUM_CORES \ finetune_trainer.py \ --tokenizer_name $m --model_name_or_path $m \ --data_dir $ENRO_DIR \ - --output_dir marian_en_ro_6_3 \ + --output_dir marian_en_ro_6_3 --overwrite_output_dir \ --learning_rate=3e-4 \ --warmup_steps 500 \ --per_device_train_batch_size=$BS --per_device_eval_batch_size=$BS \ diff --git a/examples/legacy/seq2seq/train_distilbart_cnn.sh b/examples/legacy/seq2seq/train_distilbart_cnn.sh index a490019588ce..42f34e0cb6e7 100644 --- a/examples/legacy/seq2seq/train_distilbart_cnn.sh +++ b/examples/legacy/seq2seq/train_distilbart_cnn.sh @@ -21,7 +21,7 @@ export MAX_TGT_LEN=142 python finetune_trainer.py \ --model_name_or_path $m --tokenizer_name $tok \ --data_dir cnn_dm \ - --output_dir distilbart-cnn-12-6 \ + --output_dir distilbart-cnn-12-6 --overwrite_output_dir \ --learning_rate=3e-5 \ --warmup_steps 500 --sortish_sampler \ --fp16 \ diff --git a/examples/legacy/seq2seq/train_mbart_cc25_enro.sh b/examples/legacy/seq2seq/train_mbart_cc25_enro.sh index fb31790a2c19..63c8051b47de 100644 --- a/examples/legacy/seq2seq/train_mbart_cc25_enro.sh +++ b/examples/legacy/seq2seq/train_mbart_cc25_enro.sh @@ -15,7 +15,7 @@ python finetune_trainer.py \ --model_name_or_path=facebook/mbart-large-cc25 \ --data_dir $ENRO_DIR \ - --output_dir mbart_cc25_enro \ + --output_dir mbart_cc25_enro --overwrite_output_dir \ --learning_rate=3e-5 \ --warmup_steps 500 \ --fp16 \ diff --git a/examples/legacy/seq2seq/utils.py b/examples/legacy/seq2seq/utils.py index 43d66128360d..221b1405aa26 100644 --- a/examples/legacy/seq2seq/utils.py +++ b/examples/legacy/seq2seq/utils.py @@ -639,3 +639,27 @@ def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] + + +def check_output_dir(args, expected_items=0): + """ + Checks whether to bail out if output_dir already exists and has more than expected_items in it + + `args`: needs to have the following attributes of `args`: + - output_dir + - do_train + - overwrite_output_dir + + `expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM) + """ + if ( + os.path.exists(args.output_dir) + and len(os.listdir(args.output_dir)) > expected_items + and args.do_train + and not args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({args.output_dir}) already exists and " + f"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). " + "Use --overwrite_output_dir to overcome." + ) diff --git a/examples/legacy/token-classification/run_ner.py b/examples/legacy/token-classification/run_ner.py index 1e6e5e402310..69b8a27ac799 100644 --- a/examples/legacy/token-classification/run_ner.py +++ b/examples/legacy/token-classification/run_ner.py @@ -111,6 +111,17 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" + " --overwrite_output_dir to overcome." + ) + module = import_module("tasks") try: token_classification_task_clazz = getattr(module, model_args.task_type) diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index f803f71525e3..18ed7ae8e38b 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -72,7 +72,8 @@ token-classification/run_ner.py -h You can resume training from a previous checkpoint like this: -1. Pass `--resume_from_checkpoint path_to_a_specific_checkpoint` to resume training from that checkpoint folder. +1. Pass `--output_dir previous_output_dir` without `--overwrite_output_dir` to resume training from the latest checkpoint in `output_dir` (what you would use if the training was interrupted, for instance). +2. Pass `--resume_from_checkpoint path_to_a_specific_checkpoint` to resume training from that checkpoint folder. Should you want to turn an example into a notebook where you'd no longer have access to the command line, 🤗 Trainer supports resuming from a checkpoint via `trainer.train(resume_from_checkpoint)`. diff --git a/examples/pytorch/audio-classification/README.md b/examples/pytorch/audio-classification/README.md index 8872563716c0..8aacb5fc38e1 100644 --- a/examples/pytorch/audio-classification/README.md +++ b/examples/pytorch/audio-classification/README.md @@ -34,6 +34,7 @@ python run_audio_classification.py \ --dataset_name superb \ --dataset_config_name ks \ --output_dir wav2vec2-base-ft-keyword-spotting \ + --overwrite_output_dir \ --remove_unused_columns False \ --do_train \ --do_eval \ @@ -75,6 +76,7 @@ python run_audio_classification.py \ --audio_column_name audio \ --label_column_name language \ --output_dir wav2vec2-base-lang-id \ + --overwrite_output_dir \ --remove_unused_columns False \ --do_train \ --do_eval \ diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index 3abd6e158d39..bd190e801520 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -47,6 +47,7 @@ TrainingArguments, set_seed, ) +from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version @@ -244,6 +245,21 @@ def main(): # Set seed before initializing model. set_seed(training_args.seed) + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to train from scratch." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Initialize our dataset and prepare it for the audio classification task. raw_datasets = DatasetDict() raw_datasets["train"] = load_dataset( @@ -392,6 +408,8 @@ def compute_metrics(eval_pred): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/contrastive-image-text/README.md b/examples/pytorch/contrastive-image-text/README.md index 9211c0cc59f9..864a31b60372 100644 --- a/examples/pytorch/contrastive-image-text/README.md +++ b/examples/pytorch/contrastive-image-text/README.md @@ -97,5 +97,6 @@ python run_clip.py \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ + --overwrite_output_dir \ --push_to_hub ``` diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 51b31edb92d0..461062f6849b 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -271,6 +271,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # 3. Detecting last checkpoint and eventually continue from last checkpoint + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # 4. Load dataset # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ @@ -482,6 +497,8 @@ def filter_corrupt_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 6da5876fd0ca..8b498b545c45 100755 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -225,6 +225,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -403,6 +418,8 @@ def val_transforms(example_batch): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/image-pretraining/README.md b/examples/pytorch/image-pretraining/README.md index f5a1289efb43..865818f52938 100644 --- a/examples/pytorch/image-pretraining/README.md +++ b/examples/pytorch/image-pretraining/README.md @@ -44,6 +44,7 @@ Alternatively, one can decide to further pre-train an already pre-trained (or fi !python run_mim.py \ --model_type vit \ --output_dir ./outputs/ \ + --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ @@ -94,6 +95,7 @@ Next, we can run the script by providing the path to this custom configuration ( --config_name_or_path path_to_config \ --model_type swin \ --output_dir ./outputs/ \ + --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 14da341177bb..2d92d8ab434d 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -217,6 +217,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Initialize our dataset. ds = load_dataset( data_args.dataset_name, @@ -362,6 +377,8 @@ def preprocess_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 6a17d4242e17..5a636bbad58b 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -281,6 +281,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Initialize our dataset. ds = load_dataset( data_args.dataset_name, @@ -441,6 +456,8 @@ def preprocess_images(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/instance-segmentation/run_instance_segmentation.py b/examples/pytorch/instance-segmentation/run_instance_segmentation.py index 5d9e992c4b9e..ddfd05e0f661 100644 --- a/examples/pytorch/instance-segmentation/run_instance_segmentation.py +++ b/examples/pytorch/instance-segmentation/run_instance_segmentation.py @@ -334,6 +334,18 @@ def find_last_checkpoint(training_args: TrainingArguments) -> Optional[str]: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: + checkpoint = get_last_checkpoint(training_args.output_dir) + if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) return checkpoint diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index eda38a2e0d39..8c677b404630 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -317,6 +317,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -650,6 +665,8 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/language-modeling/run_fim.py b/examples/pytorch/language-modeling/run_fim.py index 4e309e555e0d..134d741f6b6c 100644 --- a/examples/pytorch/language-modeling/run_fim.py +++ b/examples/pytorch/language-modeling/run_fim.py @@ -344,6 +344,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -791,6 +806,8 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 9e9a2bd680a7..9c0bf50ede28 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -290,6 +290,21 @@ def main(): # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -616,6 +631,8 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index b0df1dfb9bc7..86bc31beedf8 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -269,6 +269,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -513,6 +528,8 @@ def group_texts(examples): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 7933b7f968e9..a8679f2b739c 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -213,6 +213,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -386,6 +401,8 @@ def compute_metrics(eval_predictions): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics diff --git a/examples/pytorch/object-detection/run_object_detection.py b/examples/pytorch/object-detection/run_object_detection.py index 64d988e2bd98..ee0bd66cae99 100644 --- a/examples/pytorch/object-detection/run_object_detection.py +++ b/examples/pytorch/object-detection/run_object_detection.py @@ -373,6 +373,23 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: + checkpoint = get_last_checkpoint(training_args.output_dir) + if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # ------------------------------------------------------------------------------------------------ # Load dataset, prepare splits # ------------------------------------------------------------------------------------------------ diff --git a/examples/pytorch/old_test_xla_examples.py b/examples/pytorch/old_test_xla_examples.py index d0bcb1c8478f..b3101aa06b98 100644 --- a/examples/pytorch/old_test_xla_examples.py +++ b/examples/pytorch/old_test_xla_examples.py @@ -55,6 +55,7 @@ def test_run_glue(self): ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} + --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 21fe714c7233..be93a526b803 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -262,6 +262,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -631,6 +646,8 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 8b31b20fee2f..4bcf4f9af8c8 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -260,6 +260,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -657,6 +672,8 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 738ca3b610e4..ac3c8ef4ec62 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -307,6 +307,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -654,6 +669,8 @@ def post_processing_function( checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index b9642e0454d6..cc45239f75c0 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -221,6 +221,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. @@ -404,6 +419,8 @@ def preprocess_batch(example_batch, transforms: A.Compose): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) diff --git a/examples/pytorch/speech-recognition/README.md b/examples/pytorch/speech-recognition/README.md index 245954920645..41df41880b5a 100644 --- a/examples/pytorch/speech-recognition/README.md +++ b/examples/pytorch/speech-recognition/README.md @@ -70,6 +70,7 @@ python run_speech_recognition_ctc.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir="./wav2vec2-common_voice-tr-demo" \ + --overwrite_output_dir \ --num_train_epochs="15" \ --per_device_train_batch_size="16" \ --gradient_accumulation_steps="2" \ @@ -105,6 +106,7 @@ torchrun \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir="./wav2vec2-common_voice-tr-demo-dist" \ + --overwrite_output_dir \ --num_train_epochs="15" \ --per_device_train_batch_size="4" \ --learning_rate="3e-4" \ @@ -154,6 +156,7 @@ However, the `--shuffle_buffer_size` argument controls how many examples we can --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="wav2vec2-xls-r-common_voice-tr-ft" \ + --overwrite_output_dir \ --max_steps="5000" \ --per_device_train_batch_size="8" \ --gradient_accumulation_steps="2" \ @@ -387,6 +390,7 @@ python run_speech_recognition_seq2seq.py \ --freeze_feature_encoder="False" \ --gradient_checkpointing \ --fp16 \ + --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ @@ -427,6 +431,7 @@ torchrun \ --freeze_feature_encoder="False" \ --gradient_checkpointing \ --fp16 \ + --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ @@ -534,6 +539,7 @@ python run_speech_recognition_seq2seq.py \ --output_dir="./" \ --preprocessing_num_workers="16" \ --length_column_name="input_length" \ + --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ @@ -575,6 +581,7 @@ torchrun \ --output_dir="./" \ --preprocessing_num_workers="16" \ --length_column_name="input_length" \ + --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index b32a63061abe..c756a6666187 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -429,6 +429,21 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -543,7 +558,7 @@ def remove_special_characters(batch): vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): - if os.path.isfile(vocab_file): + if training_args.overwrite_output_dir and os.path.isfile(vocab_file): try: os.remove(vocab_file) except OSError: @@ -766,6 +781,8 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist + if last_checkpoint is not None: + checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py index 2e7ac34ddc56..aaebf59c8660 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py @@ -406,6 +406,21 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -542,7 +557,7 @@ def remove_special_characters(batch): vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): - if os.path.isfile(vocab_file): + if training_args.overwrite_output_dir and os.path.isfile(vocab_file): try: os.remove(vocab_file) except OSError: @@ -758,6 +773,8 @@ def compute_metrics(pred): # Training if training_args.do_train: # use last checkpoint if exist + if last_checkpoint is not None: + checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 77df3872e771..4b6cda49925b 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -340,6 +340,21 @@ def main(): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) + # 3. Detecting last checkpoint and eventually continue from last checkpoint + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -588,6 +603,8 @@ def compute_metrics(pred): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the feature extractor too for easy upload diff --git a/examples/pytorch/summarization/README.md b/examples/pytorch/summarization/README.md index e47f09120514..26561df24249 100644 --- a/examples/pytorch/summarization/README.md +++ b/examples/pytorch/summarization/README.md @@ -50,6 +50,7 @@ python run_summarization.py \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -71,6 +72,7 @@ python run_summarization.py \ --validation_file path_to_csv_or_jsonlines_file \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ + --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 2ab7ff3d0759..dd7dd083b49a 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -374,6 +374,21 @@ def main(): "`--source_prefix 'summarize: ' `" ) + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -683,6 +698,8 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/test_pytorch_examples.py b/examples/pytorch/test_pytorch_examples.py index 8bfadac779d3..d27cc305d6ac 100644 --- a/examples/pytorch/test_pytorch_examples.py +++ b/examples/pytorch/test_pytorch_examples.py @@ -104,6 +104,7 @@ def test_run_glue(self): run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} + --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train @@ -139,6 +140,7 @@ def test_run_clm(self): --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} + --overwrite_output_dir """.split() if backend_device_count(torch_device) > 1: @@ -186,6 +188,7 @@ def test_run_mlm(self): --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} + --overwrite_output_dir --do_train --do_eval --prediction_loss_only @@ -211,6 +214,7 @@ def test_run_ner(self): --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --do_train --do_eval --warmup_steps=2 @@ -239,6 +243,7 @@ def test_run_squad(self): --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train @@ -266,6 +271,7 @@ def test_run_squad_seq2seq(self): --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train @@ -290,6 +296,7 @@ def test_run_swag(self): --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --max_steps=20 --warmup_steps=2 --do_train @@ -327,6 +334,7 @@ def test_run_summarization(self): --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train @@ -356,6 +364,7 @@ def test_run_translation(self): --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} + --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train @@ -387,6 +396,7 @@ def test_run_image_classification(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False + --overwrite_output_dir True --dataloader_num_workers 16 --metric_for_best_model accuracy --max_steps 10 @@ -419,6 +429,7 @@ def test_run_speech_recognition_ctc(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False + --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --seed 42 @@ -448,6 +459,7 @@ def test_run_speech_recognition_ctc_adapter(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False + --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --target_language tur @@ -479,6 +491,7 @@ def test_run_speech_recognition_seq2seq(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 4 --remove_unused_columns False + --overwrite_output_dir True --preprocessing_num_workers 16 --max_steps 10 --seed 42 @@ -510,6 +523,7 @@ def test_run_audio_classification(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False + --overwrite_output_dir True --num_train_epochs 10 --max_steps 50 --seed 42 @@ -558,6 +572,7 @@ def test_run_vit_mae_pretraining(self): --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --remove_unused_columns False + --overwrite_output_dir True --dataloader_num_workers 16 --metric_for_best_model accuracy --max_steps 10 @@ -582,6 +597,7 @@ def test_run_semantic_segmentation(self): --do_train --do_eval --remove_unused_columns False + --overwrite_output_dir True --max_steps 10 --learning_rate=2e-4 --per_device_train_batch_size=2 @@ -608,6 +624,7 @@ def test_run_object_detection(self): --do_train --do_eval --remove_unused_columns False + --overwrite_output_dir True --eval_do_concat_batches False --max_steps 10 --learning_rate=1e-6 diff --git a/examples/pytorch/text-classification/run_classification.py b/examples/pytorch/text-classification/run_classification.py index bf62b52e355c..35413cd7875b 100755 --- a/examples/pytorch/text-classification/run_classification.py +++ b/examples/pytorch/text-classification/run_classification.py @@ -321,6 +321,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -678,6 +693,8 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 101dff28e986..afa09d746041 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -266,6 +266,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -551,6 +566,8 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 8a57d101e997..3027da5feae6 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -224,6 +224,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -397,6 +412,8 @@ def compute_metrics(p: EvalPrediction): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 19b8b59ac36a..7620d697c126 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -263,6 +263,21 @@ def main(): ) logger.info(f"Training/evaluation parameters {training_args}") + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -572,6 +587,8 @@ def compute_metrics(p): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/examples/pytorch/translation/README.md b/examples/pytorch/translation/README.md index 2aab14e2e056..4659843c66a1 100644 --- a/examples/pytorch/translation/README.md +++ b/examples/pytorch/translation/README.md @@ -53,6 +53,7 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -73,6 +74,7 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -94,6 +96,7 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -115,6 +118,7 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` @@ -140,6 +144,7 @@ python run_translation.py \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ + --overwrite_output_dir \ --predict_with_generate ``` diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index da3cb9bdc1ec..8e005e0d7323 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -322,6 +322,21 @@ def main(): "`--source_prefix 'translate English to German: ' `" ) + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Set seed before initializing model. set_seed(training_args.seed) @@ -602,6 +617,8 @@ def compute_metrics(eval_preds): checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index c1e1e0d3ef07..c1d81972f8a9 100755 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -1796,7 +1796,7 @@ def _log_model_checkpoint(self, source_directory: str, checkpoint: str): def on_init_end(self, args, state, control, **kwargs): self._volatile_checkpoints_dir = None - if self._log_checkpoints and args.save_total_limit is not None: + if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name if self._log_checkpoints == "best" and not args.load_best_model_at_end: diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 55b7c4fc2a29..e69696bd825a 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -220,6 +220,9 @@ class TrainingArguments: Parameters: output_dir (`str`, *optional*, defaults to `"trainer_output"`): The output directory where the model predictions and checkpoints will be written. + overwrite_output_dir (`bool`, *optional*, defaults to `False`): + If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` + points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example @@ -807,7 +810,8 @@ class TrainingArguments: default=False, metadata={ "help": ( - "This argument is deprecated and will be removed in v5." + "Overwrite the content of the output directory. " + "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py index 87ddf02ec341..8ada67913b03 100755 --- a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -219,6 +219,21 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -437,12 +452,16 @@ def tokenize_function(examples): # Training if training_args.do_train: {%- if cookiecutter.can_train_from_scratch == "False" %} - if os.path.isdir(model_args.model_name_or_path): + if last_checkpoint is not None: + checkpoint = last_checkpoint + elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None {%- elif cookiecutter.can_train_from_scratch == "True" %} - if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): + if last_checkpoint is not None: + checkpoint = last_checkpoint + elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py index bf3aba7e1a4d..99b1450a0d59 100644 --- a/tests/deepspeed/test_deepspeed.py +++ b/tests/deepspeed/test_deepspeed.py @@ -1303,6 +1303,7 @@ def run_trainer( --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --output_dir {output_dir} + --overwrite_output_dir --max_source_length {max_len} --max_target_length {max_len} --val_max_target_length {max_len} @@ -1372,6 +1373,7 @@ def test_clm(self, stage, dtype): --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} + --overwrite_output_dir --do_train --do_eval --max_train_samples 16 @@ -1408,6 +1410,7 @@ def test_clm_from_config_zero3_fp16(self): --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} + --overwrite_output_dir --do_train --max_train_samples 4 --per_device_train_batch_size 2 diff --git a/tests/deepspeed/test_model_zoo.py b/tests/deepspeed/test_model_zoo.py index a7201ef7f9a0..2195bee01ccf 100644 --- a/tests/deepspeed/test_model_zoo.py +++ b/tests/deepspeed/test_model_zoo.py @@ -161,6 +161,7 @@ def make_task_cmds(): --num_train_epochs 1 --fp16 --report_to none + --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) diff --git a/tests/extended/test_trainer_ext.py b/tests/extended/test_trainer_ext.py index 80e71691bbbd..1789f9f6c98b 100644 --- a/tests/extended/test_trainer_ext.py +++ b/tests/extended/test_trainer_ext.py @@ -267,6 +267,7 @@ def run_trainer( --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} + --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} diff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py index 8764e21e6713..6a4060b0a731 100644 --- a/tests/fsdp/test_fsdp.py +++ b/tests/fsdp/test_fsdp.py @@ -439,6 +439,7 @@ def get_base_args(self, output_dir, num_epochs, logging_steps): --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir {output_dir} + --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 diff --git a/tests/sagemaker/conftest.py b/tests/sagemaker/conftest.py index 879cfff1c5ea..5daf3c4147f9 100644 --- a/tests/sagemaker/conftest.py +++ b/tests/sagemaker/conftest.py @@ -21,6 +21,7 @@ class SageMakerTestEnvironment: "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", + "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index f5a259792518..525b63f1bc88 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -198,6 +198,21 @@ def main(): else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -444,7 +459,9 @@ def compute_metrics(p: EvalPrediction): # Training if training_args.do_train: checkpoint = None - if os.path.isdir(model_args.model_name_or_path): + if last_checkpoint is not None: + checkpoint = last_checkpoint + elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index cd4ce82e642f..266a874b64b5 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -4459,6 +4459,7 @@ def test_end_to_end_example(self): "1", "--output_dir", tmpdir, + "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", From 3acbdb9753d28f270f369c2adfb60467172aeac1 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Mon, 6 Oct 2025 09:45:24 +0000 Subject: [PATCH 8/8] update version --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index e69696bd825a..46b38b26a977 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1728,7 +1728,7 @@ def __post_init__(self): self.report_to = [self.report_to] if self.warmup_ratio is not None: - logger.warning("warmup_ratio is deprecated and will be removed in v5. Use `warmup_steps` instead.") + logger.warning("warmup_ratio is deprecated and will be removed in v5.2. Use `warmup_steps` instead.") self.warmup_steps = self.warmup_ratio if self.warmup_steps < 0: