From 16cfb01db92def357fc1cc39cc64d7ccdb85ab97 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Thu, 18 Apr 2024 13:06:19 -0400 Subject: [PATCH] Deprecate --- docs/source/accelerate/deepspeed.md | 4 ++-- docs/source/accelerate/fsdp.md | 4 ++-- docs/source/quicktour.md | 2 +- docs/source/task_guides/lora_based_methods.md | 2 +- .../peft_prompt_tuning_seq2seq_with_generate.ipynb | 2 +- .../image_classification/image_classification_peft_lora.ipynb | 2 +- examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb | 2 +- .../int8_training/peft_bnb_whisper_large_v2_training.ipynb | 2 +- examples/poly/peft_poly_seq2seq_with_generate.ipynb | 2 +- .../semantic_segmentation_peft_lora.ipynb | 2 +- examples/sft/run_peft.sh | 2 +- examples/sft/run_peft_deepspeed.sh | 2 +- examples/sft/run_peft_fsdp.sh | 2 +- examples/sft/run_peft_multigpu.sh | 2 +- examples/sft/run_peft_qlora_deepspeed_stage3.sh | 2 +- examples/sft/run_peft_qlora_fsdp.sh | 2 +- examples/sft/run_unsloth_peft.sh | 2 +- 17 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/source/accelerate/deepspeed.md b/docs/source/accelerate/deepspeed.md index d3d6aeb714..e377870533 100644 --- a/docs/source/accelerate/deepspeed.md +++ b/docs/source/accelerate/deepspeed.md @@ -96,7 +96,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ @@ -219,7 +219,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train. --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/docs/source/accelerate/fsdp.md b/docs/source/accelerate/fsdp.md index c862c4060d..0df79b0229 100644 --- a/docs/source/accelerate/fsdp.md +++ b/docs/source/accelerate/fsdp.md @@ -74,7 +74,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ @@ -218,7 +218,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index d7dae7b7ad..4a13b6bc57 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -76,7 +76,7 @@ training_args = TrainingArguments( per_device_eval_batch_size=32, num_train_epochs=2, weight_decay=0.01, - evaluation_strategy="epoch", + eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) diff --git a/docs/source/task_guides/lora_based_methods.md b/docs/source/task_guides/lora_based_methods.md index 87b244ad3b..de400c5965 100644 --- a/docs/source/task_guides/lora_based_methods.md +++ b/docs/source/task_guides/lora_based_methods.md @@ -257,7 +257,7 @@ batch_size = 128 args = TrainingArguments( peft_model_id, remove_unused_columns=False, - evaluation_strategy="epoch", + eval_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, diff --git a/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb b/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb index a6556c1d6b..92ef4bfc76 100644 --- a/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb +++ b/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb @@ -558,7 +558,7 @@ " per_device_train_batch_size=batch_size,\n", " learning_rate=lr,\n", " num_train_epochs=num_epochs,\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " logging_strategy=\"epoch\",\n", " save_strategy=\"no\",\n", " report_to=[],\n", diff --git a/examples/image_classification/image_classification_peft_lora.ipynb b/examples/image_classification/image_classification_peft_lora.ipynb index 8c6b58e5ed..dae914672b 100644 --- a/examples/image_classification/image_classification_peft_lora.ipynb +++ b/examples/image_classification/image_classification_peft_lora.ipynb @@ -1008,7 +1008,7 @@ "args = TrainingArguments(\n", " f\"{model_name}-finetuned-lora-food101\",\n", " remove_unused_columns=False,\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " save_strategy=\"epoch\",\n", " learning_rate=5e-3,\n", " per_device_train_batch_size=batch_size,\n", diff --git a/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb b/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb index e7140ba6bd..43c562ed79 100644 --- a/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb +++ b/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb @@ -819,7 +819,7 @@ "\n", "training_args = TrainingArguments(\n", " \"temp\",\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " learning_rate=1e-3,\n", " gradient_accumulation_steps=1,\n", " auto_find_batch_size=True,\n", diff --git a/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb b/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb index 9f1e6688de..aa7c35a161 100644 --- a/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb +++ b/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb @@ -1246,7 +1246,7 @@ " learning_rate=1e-3,\n", " warmup_steps=50,\n", " num_train_epochs=3,\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " fp16=True,\n", " per_device_eval_batch_size=8,\n", " generation_max_length=128,\n", diff --git a/examples/poly/peft_poly_seq2seq_with_generate.ipynb b/examples/poly/peft_poly_seq2seq_with_generate.ipynb index d81debf1d4..c10c06f85d 100644 --- a/examples/poly/peft_poly_seq2seq_with_generate.ipynb +++ b/examples/poly/peft_poly_seq2seq_with_generate.ipynb @@ -973,7 +973,7 @@ " per_device_eval_batch_size=batch_size,\n", " learning_rate=lr,\n", " num_train_epochs=num_epochs,\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " logging_strategy=\"epoch\",\n", " save_strategy=\"no\",\n", " report_to=[],\n", diff --git a/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb b/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb index 83fa2fc41e..34c8a25675 100644 --- a/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb +++ b/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb @@ -587,7 +587,7 @@ " per_device_train_batch_size=4,\n", " per_device_eval_batch_size=2,\n", " save_total_limit=3,\n", - " evaluation_strategy=\"epoch\",\n", + " eval_strategy=\"epoch\",\n", " save_strategy=\"epoch\",\n", " logging_steps=5,\n", " remove_unused_columns=False,\n", diff --git a/examples/sft/run_peft.sh b/examples/sft/run_peft.sh index ca51d59864..8aa48648d3 100644 --- a/examples/sft/run_peft.sh +++ b/examples/sft/run_peft.sh @@ -11,7 +11,7 @@ python train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_peft_deepspeed.sh b/examples/sft/run_peft_deepspeed.sh index 4fc10c22be..95dbf08892 100644 --- a/examples/sft/run_peft_deepspeed.sh +++ b/examples/sft/run_peft_deepspeed.sh @@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_peft_fsdp.sh b/examples/sft/run_peft_fsdp.sh index c4717031a5..63dd475f44 100644 --- a/examples/sft/run_peft_fsdp.sh +++ b/examples/sft/run_peft_fsdp.sh @@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_peft_multigpu.sh b/examples/sft/run_peft_multigpu.sh index 9587b19a7b..46a8aa2f33 100644 --- a/examples/sft/run_peft_multigpu.sh +++ b/examples/sft/run_peft_multigpu.sh @@ -11,7 +11,7 @@ torchrun --nproc_per_node 8 --nnodes 1 train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_peft_qlora_deepspeed_stage3.sh b/examples/sft/run_peft_qlora_deepspeed_stage3.sh index a414437cc8..4bbc1bbcc4 100644 --- a/examples/sft/run_peft_qlora_deepspeed_stage3.sh +++ b/examples/sft/run_peft_qlora_deepspeed_stage3.sh @@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train. --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_peft_qlora_fsdp.sh b/examples/sft/run_peft_qlora_fsdp.sh index a0b6570b7a..4ed3218c82 100644 --- a/examples/sft/run_peft_qlora_fsdp.sh +++ b/examples/sft/run_peft_qlora_fsdp.sh @@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ diff --git a/examples/sft/run_unsloth_peft.sh b/examples/sft/run_unsloth_peft.sh index 2516b62ad6..97a4a6b520 100644 --- a/examples/sft/run_unsloth_peft.sh +++ b/examples/sft/run_unsloth_peft.sh @@ -11,7 +11,7 @@ python train.py \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ ---evaluation_strategy "epoch" \ +--eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \