Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
a81a1ef8
Commit
a81a1ef8
authored
Nov 10, 2018
by
thomwolf
Browse files
fixing learning rate schedule when using gradient_accumulation_steps
parent
ea85cca8
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
18 additions
and
2 deletions
+18
-2
run_classifier.py
run_classifier.py
+1
-1
run_squad.py
run_squad.py
+17
-1
No files found.
run_classifier.py
View file @
a81a1ef8
...
...
@@ -464,7 +464,7 @@ def main():
if
args
.
do_train
:
train_examples
=
processor
.
get_train_examples
(
args
.
data_dir
)
num_train_steps
=
int
(
len
(
train_examples
)
/
args
.
train_batch_size
*
args
.
num_train_epochs
)
len
(
train_examples
)
/
args
.
train_batch_size
/
args
.
gradient_accumulation_steps
*
args
.
num_train_epochs
)
model
=
BertForSequenceClassification
(
bert_config
,
len
(
label_list
))
if
args
.
init_checkpoint
is
not
None
:
...
...
run_squad.py
View file @
a81a1ef8
...
...
@@ -742,6 +742,10 @@ def main():
default
=
False
,
action
=
'store_true'
,
help
=
"Whether to perform optimization and keep the optimizer averages on CPU"
)
parser
.
add_argument
(
'--fp16'
,
default
=
False
,
action
=
'store_true'
,
help
=
"Whether to use 16-bit float precision instead of 32-bit"
)
args
=
parser
.
parse_args
()
...
...
@@ -801,11 +805,13 @@ def main():
train_examples
=
read_squad_examples
(
input_file
=
args
.
train_file
,
is_training
=
True
)
num_train_steps
=
int
(
len
(
train_examples
)
/
args
.
train_batch_size
*
args
.
num_train_epochs
)
len
(
train_examples
)
/
args
.
train_batch_size
/
args
.
gradient_accumulation_steps
*
args
.
num_train_epochs
)
model
=
BertForQuestionAnswering
(
bert_config
)
if
args
.
init_checkpoint
is
not
None
:
model
.
bert
.
load_state_dict
(
torch
.
load
(
args
.
init_checkpoint
,
map_location
=
'cpu'
))
if
args
.
fp16
:
model
.
half
()
if
not
args
.
optimize_on_cpu
:
model
.
to
(
device
)
...
...
@@ -847,6 +853,12 @@ def main():
all_start_positions
=
torch
.
tensor
([
f
.
start_position
for
f
in
train_features
],
dtype
=
torch
.
long
)
all_end_positions
=
torch
.
tensor
([
f
.
end_position
for
f
in
train_features
],
dtype
=
torch
.
long
)
if
args
.
fp16
:
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_start_positions
,
all_end_positions
)
=
tuple
(
t
.
half
()
for
t
in
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_start_positions
,
all_end_positions
))
train_data
=
TensorDataset
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_start_positions
,
all_end_positions
)
if
args
.
local_rank
==
-
1
:
...
...
@@ -895,6 +907,10 @@ def main():
all_input_mask
=
torch
.
tensor
([
f
.
input_mask
for
f
in
eval_features
],
dtype
=
torch
.
long
)
all_segment_ids
=
torch
.
tensor
([
f
.
segment_ids
for
f
in
eval_features
],
dtype
=
torch
.
long
)
all_example_index
=
torch
.
arange
(
all_input_ids
.
size
(
0
),
dtype
=
torch
.
long
)
if
args
.
fp16
:
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_example_index
)
=
tuple
(
t
.
half
()
for
t
in
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_example_index
))
eval_data
=
TensorDataset
(
all_input_ids
,
all_input_mask
,
all_segment_ids
,
all_example_index
)
if
args
.
local_rank
==
-
1
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment