Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
1ceac85e
Commit
1ceac85e
authored
Nov 04, 2018
by
thomwolf
Browse files
add gradient accumulation
parent
6b0da96b
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
29 additions
and
17 deletions
+29
-17
run_classifier.py
run_classifier.py
+16
-9
run_squad.py
run_squad.py
+13
-8
No files found.
run_classifier.py
View file @
1ceac85e
...
...
@@ -426,7 +426,7 @@ def main():
parser
.
add_argument
(
"--accumulate_gradients"
,
type
=
int
,
default
=
1
,
help
=
"Number of steps to accumulate gradient on (divide the
single step batch_siz
e)"
)
help
=
"Number of steps to accumulate gradient on (divide the
batch_size and accumulat
e)"
)
parser
.
add_argument
(
"--local_rank"
,
type
=
int
,
default
=-
1
,
...
...
@@ -452,10 +452,17 @@ def main():
# print("Initializing the distributed backend: NCCL")
print
(
"device"
,
device
,
"n_gpu"
,
n_gpu
)
if
args
.
accumulate_gradients
<
1
:
raise
ValueError
(
"Invalid accumulate_gradients parameter: {}, should be >= 1"
.
format
(
args
.
accumulate_gradients
))
args
.
batch_size
=
args
.
batch_size
/
args
.
accumulate_gradients
random
.
seed
(
args
.
seed
)
np
.
random
.
seed
(
args
.
seed
)
torch
.
manual_seed
(
args
.
seed
)
if
n_gpu
>
0
:
torch
.
cuda
.
manual_seed_all
(
args
.
seed
)
if
n_gpu
>
0
:
torch
.
cuda
.
manual_seed_all
(
args
.
seed
)
if
not
args
.
do_train
and
not
args
.
do_eval
:
raise
ValueError
(
"At least one of `do_train` or `do_eval` must be True."
)
...
...
@@ -531,11 +538,10 @@ def main():
train_dataloader
=
DataLoader
(
train_data
,
sampler
=
train_sampler
,
batch_size
=
args
.
train_batch_size
)
model
.
train
()
for
epoch
in
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
):
tr_loss
=
0
nb_tr_examples
,
nb_tr_steps
=
0
,
0
for
input_ids
,
input_mask
,
segment_ids
,
label_ids
in
tqdm
(
train_dataloader
,
desc
=
"Iteration"
):
for
step
,
(
input_ids
,
input_mask
,
segment_ids
,
label_ids
)
in
enumerate
(
tqdm
(
train_dataloader
,
desc
=
"Iteration"
)
)
:
input_ids
=
input_ids
.
to
(
device
)
input_mask
=
input_mask
.
float
().
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
...
...
@@ -546,12 +552,13 @@ def main():
loss
=
loss
.
mean
()
# mean() to average on multi-gpu.
tr_loss
+=
loss
.
item
()
nb_tr_examples
+=
input_ids
.
size
(
0
)
nb_tr_steps
+=
1
loss
.
backward
()
if
(
step
+
1
)
%
args
.
gradient_accumulation_steps
==
0
:
optimizer
.
step
()
# We have accumulated enought gradients
model
.
zero_grad
()
loss
.
backward
()
optimizer
.
step
()
global_step
+=
1
nb_tr_steps
+=
1
if
args
.
do_eval
:
eval_examples
=
processor
.
get_dev_examples
(
args
.
data_dir
)
...
...
run_squad.py
View file @
1ceac85e
...
...
@@ -731,6 +731,10 @@ def main():
type
=
int
,
default
=-
1
,
help
=
"local_rank for distributed training on gpus"
)
parser
.
add_argument
(
"--accumulate_gradients"
,
type
=
int
,
default
=
1
,
help
=
"Number of steps to accumulate gradient on (divide the batch_size and accumulate)"
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
42
,
...
...
@@ -836,8 +840,8 @@ def main():
model
.
train
()
for
epoch
in
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
):
for
input_ids
,
input_mask
,
segment_ids
,
start_positions
,
end_positions
in
tqdm
(
train_dataloader
,
desc
=
"Iteration"
):
for
step
,
batch
in
enumerate
(
tqdm
(
train_dataloader
,
desc
=
"Iteration"
)):
input_ids
,
input_mask
,
segment_ids
,
start_positions
,
end_positions
=
batch
input_ids
=
input_ids
.
to
(
device
)
input_mask
=
input_mask
.
float
().
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
...
...
@@ -851,9 +855,10 @@ def main():
if
n_gpu
>
1
:
loss
=
loss
.
mean
()
# mean() to average on multi-gpu.
model
.
zero_grad
()
loss
.
backward
()
optimizer
.
step
()
if
(
step
+
1
)
%
args
.
gradient_accumulation_steps
==
0
:
optimizer
.
step
()
# We have accumulated enought gradients
model
.
zero_grad
()
global_step
+=
1
if
args
.
do_predict
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment