Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
9626e045
Commit
9626e045
authored
Nov 27, 2019
by
Bilal Khan
Committed by
Lysandre Debut
Dec 09, 2019
Browse files
Add functionality to continue training from last saved global_step
parent
2d73591a
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
21 additions
and
1 deletion
+21
-1
examples/run_lm_finetuning.py
examples/run_lm_finetuning.py
+21
-1
No files found.
examples/run_lm_finetuning.py
View file @
9626e045
...
@@ -223,17 +223,37 @@ def train(args, train_dataset, model, tokenizer):
...
@@ -223,17 +223,37 @@ def train(args, train_dataset, model, tokenizer):
logger
.
info
(
" Total optimization steps = %d"
,
t_total
)
logger
.
info
(
" Total optimization steps = %d"
,
t_total
)
global_step
=
0
global_step
=
0
epochs_trained
=
0
steps_trained_in_current_epoch
=
0
# Check if continuing training from a checkpoint
if
os
.
path
.
exists
(
args
.
model_name_or_path
):
# set global_step to gobal_step of last saved checkpoint from model path
global_step
=
int
(
args
.
model_name_or_path
.
split
(
'-'
)[
-
1
].
split
(
'/'
)[
0
])
epochs_trained
=
global_step
//
(
len
(
train_dataloader
)
//
args
.
gradient_accumulation_steps
)
steps_trained_in_current_epoch
=
global_step
%
(
len
(
train_dataloader
)
//
args
.
gradient_accumulation_steps
)
logger
.
info
(
" Continuing training from checkpoint, will skip to saved global_step"
)
logger
.
info
(
" Continuing training from epoch %d"
,
epochs_trained
)
logger
.
info
(
" Continuing training from global step %d"
,
global_step
)
logger
.
info
(
" Will skip the first %d steps in the first epoch"
,
steps_trained_in_current_epoch
)
tr_loss
,
logging_loss
=
0.0
,
0.0
tr_loss
,
logging_loss
=
0.0
,
0.0
model_to_resize
=
model
.
module
if
hasattr
(
model
,
'module'
)
else
model
# Take care of distributed/parallel training
model_to_resize
=
model
.
module
if
hasattr
(
model
,
'module'
)
else
model
# Take care of distributed/parallel training
model_to_resize
.
resize_token_embeddings
(
len
(
tokenizer
))
model_to_resize
.
resize_token_embeddings
(
len
(
tokenizer
))
model
.
zero_grad
()
model
.
zero_grad
()
train_iterator
=
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
train_iterator
=
trange
(
epochs_trained
,
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
set_seed
(
args
)
# Added here for reproducibility (even between python 2 and 3)
set_seed
(
args
)
# Added here for reproducibility (even between python 2 and 3)
for
epoch
in
train_iterator
:
for
epoch
in
train_iterator
:
epoch_iterator
=
tqdm
(
train_dataloader
,
desc
=
"Iteration"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
epoch_iterator
=
tqdm
(
train_dataloader
,
desc
=
"Iteration"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
for
step
,
batch
in
enumerate
(
epoch_iterator
):
for
step
,
batch
in
enumerate
(
epoch_iterator
):
# Skip past any already trained steps if resuming training
if
steps_trained_in_current_epoch
>
0
:
steps_trained_in_current_epoch
-=
1
continue
inputs
,
labels
=
mask_tokens
(
batch
,
tokenizer
,
args
)
if
args
.
mlm
else
(
batch
,
batch
)
inputs
,
labels
=
mask_tokens
(
batch
,
tokenizer
,
args
)
if
args
.
mlm
else
(
batch
,
batch
)
inputs
=
inputs
.
to
(
args
.
device
)
inputs
=
inputs
.
to
(
args
.
device
)
labels
=
labels
.
to
(
args
.
device
)
labels
=
labels
.
to
(
args
.
device
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment