Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
379005c9
"vscode:/vscode.git/clone" did not exist on "283c70707999a9124cacac939ac1dc05183f471c"
Unverified
Commit
379005c9
authored
Dec 01, 2020
by
Stas Bekman
Committed by
GitHub
Dec 01, 2020
Browse files
start using training_args.parallel_mode (#8882)
parent
b08843cf
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
4 additions
and
2 deletions
+4
-2
examples/seq2seq/finetune_trainer.py
examples/seq2seq/finetune_trainer.py
+2
-1
examples/seq2seq/seq2seq_trainer.py
examples/seq2seq/seq2seq_trainer.py
+2
-1
No files found.
examples/seq2seq/finetune_trainer.py
View file @
379005c9
...
...
@@ -11,6 +11,7 @@ from seq2seq_trainer import Seq2SeqTrainer
from
seq2seq_training_args
import
Seq2SeqTrainingArguments
from
transformers
import
AutoConfig
,
AutoModelForSeq2SeqLM
,
AutoTokenizer
,
HfArgumentParser
,
MBartTokenizer
,
set_seed
from
transformers.trainer_utils
import
EvaluationStrategy
,
is_main_process
from
transformers.training_args
import
ParallelMode
from
utils
import
(
Seq2SeqDataCollator
,
Seq2SeqDataset
,
...
...
@@ -132,7 +133,7 @@ def main():
training_args
.
local_rank
,
training_args
.
device
,
training_args
.
n_gpu
,
bool
(
training_args
.
local_rank
!=
-
1
),
bool
(
training_args
.
parallel_mode
==
ParallelMode
.
DISTRIBUTED
),
training_args
.
fp16
,
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/seq2seq/seq2seq_trainer.py
View file @
379005c9
...
...
@@ -18,6 +18,7 @@ from transformers.optimization import (
get_polynomial_decay_schedule_with_warmup
,
)
from
transformers.trainer_pt_utils
import
get_tpu_sampler
from
transformers.training_args
import
ParallelMode
logger
=
logging
.
get_logger
(
__name__
)
...
...
@@ -123,7 +124,7 @@ class Seq2SeqTrainer(Trainer):
if
self
.
args
.
sortish_sampler
:
self
.
train_dataset
.
make_sortish_sampler
(
self
.
args
.
per_device_train_batch_size
,
distributed
=
(
self
.
args
.
local_rank
!=
-
1
),
distributed
=
(
self
.
args
.
parallel_mode
==
ParallelMode
.
DISTRIBUTED
),
)
return
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment