Unverified Commit 60d5f8f9 authored by Zach Mueller's avatar Zach Mueller Committed by GitHub
Browse files

🚨🚨🚨Deprecate `evaluation_strategy` to `eval_strategy`🚨🚨🚨 (#30190)

* Alias

* Note alias

* Tests and src

* Rest

* Clean

* Change typing?

* Fix tests

* Deprecation versions
parent c86d020e
...@@ -231,7 +231,7 @@ pip install transformers datasets evaluate ...@@ -231,7 +231,7 @@ pip install transformers datasets evaluate
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_eli5_mlm_model", ... output_dir="my_awesome_eli5_mlm_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... num_train_epochs=3, ... num_train_epochs=3,
... weight_decay=0.01, ... weight_decay=0.01,
......
...@@ -266,7 +266,7 @@ tokenized_swag = swag.map(preprocess_function, batched=True) ...@@ -266,7 +266,7 @@ tokenized_swag = swag.map(preprocess_function, batched=True)
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_swag_model", ... output_dir="my_awesome_swag_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... load_best_model_at_end=True, ... load_best_model_at_end=True,
... learning_rate=5e-5, ... learning_rate=5e-5,
......
...@@ -220,7 +220,7 @@ pip install transformers datasets evaluate ...@@ -220,7 +220,7 @@ pip install transformers datasets evaluate
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_qa_model", ... output_dir="my_awesome_qa_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... per_device_train_batch_size=16, ... per_device_train_batch_size=16,
... per_device_eval_batch_size=16, ... per_device_eval_batch_size=16,
......
...@@ -323,7 +323,7 @@ pip install -q datasets transformers evaluate ...@@ -323,7 +323,7 @@ pip install -q datasets transformers evaluate
... per_device_train_batch_size=2, ... per_device_train_batch_size=2,
... per_device_eval_batch_size=2, ... per_device_eval_batch_size=2,
... save_total_limit=3, ... save_total_limit=3,
... evaluation_strategy="steps", ... eval_strategy="steps",
... save_strategy="steps", ... save_strategy="steps",
... save_steps=20, ... save_steps=20,
... eval_steps=20, ... eval_steps=20,
......
...@@ -324,7 +324,7 @@ pip install -q datasets transformers evaluate ...@@ -324,7 +324,7 @@ pip install -q datasets transformers evaluate
... per_device_train_batch_size=2, ... per_device_train_batch_size=2,
... per_device_eval_batch_size=2, ... per_device_eval_batch_size=2,
... save_total_limit=3, ... save_total_limit=3,
... evaluation_strategy="steps", ... eval_strategy="steps",
... save_strategy="steps", ... save_strategy="steps",
... save_steps=20, ... save_steps=20,
... eval_steps=20, ... eval_steps=20,
......
...@@ -204,7 +204,7 @@ pip install transformers datasets evaluate rouge_score ...@@ -204,7 +204,7 @@ pip install transformers datasets evaluate rouge_score
```py ```py
>>> training_args = Seq2SeqTrainingArguments( >>> training_args = Seq2SeqTrainingArguments(
... output_dir="my_awesome_billsum_model", ... output_dir="my_awesome_billsum_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... per_device_train_batch_size=16, ... per_device_train_batch_size=16,
... per_device_eval_batch_size=16, ... per_device_eval_batch_size=16,
......
...@@ -477,7 +477,7 @@ SpeechT5 では、モデルのデコーダ部分への入力が 2 分の 1 に ...@@ -477,7 +477,7 @@ SpeechT5 では、モデルのデコーダ部分への入力が 2 分の 1 に
... max_steps=4000, ... max_steps=4000,
... gradient_checkpointing=True, ... gradient_checkpointing=True,
... fp16=True, ... fp16=True,
... evaluation_strategy="steps", ... eval_strategy="steps",
... per_device_eval_batch_size=2, ... per_device_eval_batch_size=2,
... save_steps=1000, ... save_steps=1000,
... eval_steps=1000, ... eval_steps=1000,
......
...@@ -288,7 +288,7 @@ pip install transformers datasets evaluate seqeval ...@@ -288,7 +288,7 @@ pip install transformers datasets evaluate seqeval
... per_device_eval_batch_size=16, ... per_device_eval_batch_size=16,
... num_train_epochs=2, ... num_train_epochs=2,
... weight_decay=0.01, ... weight_decay=0.01,
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... load_best_model_at_end=True, ... load_best_model_at_end=True,
... push_to_hub=True, ... push_to_hub=True,
......
...@@ -208,7 +208,7 @@ pip install transformers datasets evaluate sacrebleu ...@@ -208,7 +208,7 @@ pip install transformers datasets evaluate sacrebleu
```py ```py
>>> training_args = Seq2SeqTrainingArguments( >>> training_args = Seq2SeqTrainingArguments(
... output_dir="my_awesome_opus_books_model", ... output_dir="my_awesome_opus_books_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... per_device_train_batch_size=16, ... per_device_train_batch_size=16,
... per_device_eval_batch_size=16, ... per_device_eval_batch_size=16,
......
...@@ -360,7 +360,7 @@ You should probably TRAIN this model on a down-stream task to be able to use it ...@@ -360,7 +360,7 @@ You should probably TRAIN this model on a down-stream task to be able to use it
>>> args = TrainingArguments( >>> args = TrainingArguments(
... new_model_name, ... new_model_name,
... remove_unused_columns=False, ... remove_unused_columns=False,
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... learning_rate=5e-5, ... learning_rate=5e-5,
... per_device_train_batch_size=batch_size, ... per_device_train_batch_size=batch_size,
......
...@@ -135,12 +135,12 @@ BERTモデルの事前学習済みのヘッドは破棄され、ランダムに ...@@ -135,12 +135,12 @@ BERTモデルの事前学習済みのヘッドは破棄され、ランダムに
... return metric.compute(predictions=predictions, references=labels) ... return metric.compute(predictions=predictions, references=labels)
``` ```
評価メトリクスをファインチューニング中に監視したい場合、トレーニング引数で `evaluation_strategy` パラメータを指定して、各エポックの終了時に評価メトリクスを報告します: 評価メトリクスをファインチューニング中に監視したい場合、トレーニング引数で `eval_strategy` パラメータを指定して、各エポックの終了時に評価メトリクスを報告します:
```python ```python
>>> from transformers import TrainingArguments, Trainer >>> from transformers import TrainingArguments, Trainer
>>> training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch") >>> training_args = TrainingArguments(output_dir="test_trainer", eval_strategy="epoch")
``` ```
### Trainer ### Trainer
......
...@@ -132,7 +132,7 @@ Tue Jan 11 08:58:05 2022 ...@@ -132,7 +132,7 @@ Tue Jan 11 08:58:05 2022
```py ```py
default_args = { default_args = {
"output_dir": "tmp", "output_dir": "tmp",
"evaluation_strategy": "steps", "eval_strategy": "steps",
"num_train_epochs": 1, "num_train_epochs": 1,
"log_level": "error", "log_level": "error",
"report_to": "none", "report_to": "none",
......
...@@ -274,7 +274,7 @@ MInDS-14 데이터 세트의 샘플링 레이트는 8000kHz이므로([데이터 ...@@ -274,7 +274,7 @@ MInDS-14 데이터 세트의 샘플링 레이트는 8000kHz이므로([데이터
... gradient_checkpointing=True, ... gradient_checkpointing=True,
... fp16=True, ... fp16=True,
... group_by_length=True, ... group_by_length=True,
... evaluation_strategy="steps", ... eval_strategy="steps",
... per_device_eval_batch_size=8, ... per_device_eval_batch_size=8,
... save_steps=1000, ... save_steps=1000,
... eval_steps=1000, ... eval_steps=1000,
......
...@@ -221,7 +221,7 @@ MinDS-14 데이터 세트의 샘플링 속도는 8000khz이므로(이 정보는 ...@@ -221,7 +221,7 @@ MinDS-14 데이터 세트의 샘플링 속도는 8000khz이므로(이 정보는
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_mind_model", ... output_dir="my_awesome_mind_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... learning_rate=3e-5, ... learning_rate=3e-5,
... per_device_train_batch_size=32, ... per_device_train_batch_size=32,
......
...@@ -385,7 +385,7 @@ end_index 18 ...@@ -385,7 +385,7 @@ end_index 18
... num_train_epochs=20, ... num_train_epochs=20,
... save_steps=200, ... save_steps=200,
... logging_steps=50, ... logging_steps=50,
... evaluation_strategy="steps", ... eval_strategy="steps",
... learning_rate=5e-5, ... learning_rate=5e-5,
... save_total_limit=2, ... save_total_limit=2,
... remove_unused_columns=False, ... remove_unused_columns=False,
......
...@@ -201,7 +201,7 @@ training_args = TrainingArguments( ...@@ -201,7 +201,7 @@ training_args = TrainingArguments(
per_device_eval_batch_size=32, per_device_eval_batch_size=32,
gradient_accumulation_steps=2, gradient_accumulation_steps=2,
save_total_limit=3, save_total_limit=3,
evaluation_strategy="steps", eval_strategy="steps",
eval_steps=50, eval_steps=50,
save_strategy="steps", save_strategy="steps",
save_steps=50, save_steps=50,
......
...@@ -301,7 +301,7 @@ food["test"].set_transform(preprocess_val) ...@@ -301,7 +301,7 @@ food["test"].set_transform(preprocess_val)
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_food_model", ... output_dir="my_awesome_food_model",
... remove_unused_columns=False, ... remove_unused_columns=False,
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... learning_rate=5e-5, ... learning_rate=5e-5,
... per_device_train_batch_size=16, ... per_device_train_batch_size=16,
......
...@@ -233,7 +233,7 @@ pip install transformers datasets evaluate ...@@ -233,7 +233,7 @@ pip install transformers datasets evaluate
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_eli5_clm-model", ... output_dir="my_awesome_eli5_clm-model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... weight_decay=0.01, ... weight_decay=0.01,
... push_to_hub=True, ... push_to_hub=True,
......
...@@ -236,7 +236,7 @@ Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티와 ...@@ -236,7 +236,7 @@ Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티와
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_eli5_mlm_model", ... output_dir="my_awesome_eli5_mlm_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... learning_rate=2e-5, ... learning_rate=2e-5,
... num_train_epochs=3, ... num_train_epochs=3,
... weight_decay=0.01, ... weight_decay=0.01,
......
...@@ -265,7 +265,7 @@ tokenized_swag = swag.map(preprocess_function, batched=True) ...@@ -265,7 +265,7 @@ tokenized_swag = swag.map(preprocess_function, batched=True)
```py ```py
>>> training_args = TrainingArguments( >>> training_args = TrainingArguments(
... output_dir="my_awesome_swag_model", ... output_dir="my_awesome_swag_model",
... evaluation_strategy="epoch", ... eval_strategy="epoch",
... save_strategy="epoch", ... save_strategy="epoch",
... load_best_model_at_end=True, ... load_best_model_at_end=True,
... learning_rate=5e-5, ... learning_rate=5e-5,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment