Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
1652ddad
Unverified
Commit
1652ddad
authored
Oct 16, 2020
by
Stas Bekman
Committed by
GitHub
Oct 16, 2020
Browse files
[seq2seq testing] improve readability (#7845)
parent
466115b2
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
32 additions
and
52 deletions
+32
-52
examples/seq2seq/test_finetune_trainer.py
examples/seq2seq/test_finetune_trainer.py
+32
-52
No files found.
examples/seq2seq/test_finetune_trainer.py
View file @
1652ddad
...
...
@@ -47,58 +47,38 @@ def test_finetune_trainer_slow():
def
run_trainer
(
eval_steps
:
int
,
max_len
:
str
,
model_name
:
str
,
num_train_epochs
:
int
):
data_dir
=
"examples/seq2seq/test_data/wmt_en_ro"
output_dir
=
tempfile
.
mkdtemp
(
prefix
=
"test_output"
)
argv
=
[
"--model_name_or_path"
,
model_name
,
"--data_dir"
,
data_dir
,
"--output_dir"
,
output_dir
,
"--overwrite_output_dir"
,
"--n_train"
,
"8"
,
"--n_val"
,
"8"
,
"--max_source_length"
,
max_len
,
"--max_target_length"
,
max_len
,
"--val_max_target_length"
,
max_len
,
"--do_train"
,
"--do_eval"
,
"--do_predict"
,
"--num_train_epochs"
,
str
(
num_train_epochs
),
"--per_device_train_batch_size"
,
"4"
,
"--per_device_eval_batch_size"
,
"4"
,
"--learning_rate"
,
"3e-4"
,
"--warmup_steps"
,
"8"
,
"--evaluate_during_training"
,
"--predict_with_generate"
,
"--logging_steps"
,
0
,
"--save_steps"
,
str
(
eval_steps
),
"--eval_steps"
,
str
(
eval_steps
),
"--sortish_sampler"
,
"--label_smoothing"
,
"0.1"
,
# "--eval_beams",
# "2",
"--adafactor"
,
"--task"
,
"translation"
,
"--tgt_lang"
,
"ro_RO"
,
"--src_lang"
,
"en_XX"
,
]
argv
=
f
"""
--model_name_or_path
{
model_name
}
--data_dir
{
data_dir
}
--output_dir
{
output_dir
}
--overwrite_output_dir
--n_train 8
--n_val 8
--max_source_length
{
max_len
}
--max_target_length
{
max_len
}
--val_max_target_length
{
max_len
}
--do_train
--do_eval
--do_predict
--num_train_epochs
{
str
(
num_train_epochs
)
}
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--learning_rate 3e-4
--warmup_steps 8
--evaluate_during_training
--predict_with_generate
--logging_steps 0
--save_steps
{
str
(
eval_steps
)
}
--eval_steps
{
str
(
eval_steps
)
}
--sortish_sampler
--label_smoothing 0.1
--adafactor
--task translation
--tgt_lang ro_RO
--src_lang en_XX
"""
.
split
()
# --eval_beams 2
testargs
=
[
"finetune_trainer.py"
]
+
argv
with
patch
.
object
(
sys
,
"argv"
,
testargs
):
main
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment