Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
184c5586
Commit
184c5586
authored
Aug 27, 2020
by
Hongkun Yu
Committed by
A. Unique TensorFlower
Aug 27, 2020
Browse files
[Minor Cleanup] Move clip_by_global_norm_callback to model_training_utils
PiperOrigin-RevId: 328888268
parent
9ac54b65
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
13 additions
and
9 deletions
+13
-9
official/nlp/bert/common_flags.py
official/nlp/bert/common_flags.py
+0
-7
official/nlp/bert/model_training_utils.py
official/nlp/bert/model_training_utils.py
+7
-0
official/nlp/bert/run_pretraining.py
official/nlp/bert/run_pretraining.py
+3
-1
official/nlp/bert/run_squad_helper.py
official/nlp/bert/run_squad_helper.py
+3
-1
No files found.
official/nlp/bert/common_flags.py
View file @
184c5586
...
@@ -121,10 +121,3 @@ def use_graph_rewrite():
...
@@ -121,10 +121,3 @@ def use_graph_rewrite():
def
get_loss_scale
():
def
get_loss_scale
():
return
flags_core
.
get_loss_scale
(
flags
.
FLAGS
,
default_for_fp16
=
'dynamic'
)
return
flags_core
.
get_loss_scale
(
flags
.
FLAGS
,
default_for_fp16
=
'dynamic'
)
def
clip_by_global_norm_callback
(
grads_and_vars
):
grads
,
variables
=
zip
(
*
grads_and_vars
)
(
clipped_grads
,
_
)
=
tf
.
clip_by_global_norm
(
grads
,
clip_norm
=
1.0
)
return
zip
(
clipped_grads
,
variables
)
official/nlp/bert/model_training_utils.py
View file @
184c5586
...
@@ -75,6 +75,13 @@ def _float_metric_value(metric):
...
@@ -75,6 +75,13 @@ def _float_metric_value(metric):
return
metric
.
result
().
numpy
().
astype
(
float
)
return
metric
.
result
().
numpy
().
astype
(
float
)
def
clip_by_global_norm_callback
(
grads_and_vars
):
"""Performs gradient clipping."""
grads
,
variables
=
zip
(
*
grads_and_vars
)
(
clipped_grads
,
_
)
=
tf
.
clip_by_global_norm
(
grads
,
clip_norm
=
1.0
)
return
zip
(
clipped_grads
,
variables
)
def
steps_to_run
(
current_step
,
steps_per_epoch
,
steps_per_loop
):
def
steps_to_run
(
current_step
,
steps_per_epoch
,
steps_per_loop
):
"""Calculates steps to run on device."""
"""Calculates steps to run on device."""
if
steps_per_loop
<=
0
:
if
steps_per_loop
<=
0
:
...
...
official/nlp/bert/run_pretraining.py
View file @
184c5586
...
@@ -189,7 +189,9 @@ def run_bert_pretrain(strategy, custom_callbacks=None):
...
@@ -189,7 +189,9 @@ def run_bert_pretrain(strategy, custom_callbacks=None):
FLAGS
.
train_summary_interval
,
FLAGS
.
train_summary_interval
,
custom_callbacks
=
custom_callbacks
,
custom_callbacks
=
custom_callbacks
,
explicit_allreduce
=
FLAGS
.
explicit_allreduce
,
explicit_allreduce
=
FLAGS
.
explicit_allreduce
,
pre_allreduce_callbacks
=
[
common_flags
.
clip_by_global_norm_callback
])
pre_allreduce_callbacks
=
[
model_training_utils
.
clip_by_global_norm_callback
])
def
main
(
_
):
def
main
(
_
):
...
...
official/nlp/bert/run_squad_helper.py
View file @
184c5586
...
@@ -278,7 +278,9 @@ def train_squad(strategy,
...
@@ -278,7 +278,9 @@ def train_squad(strategy,
run_eagerly
=
run_eagerly
,
run_eagerly
=
run_eagerly
,
custom_callbacks
=
custom_callbacks
,
custom_callbacks
=
custom_callbacks
,
explicit_allreduce
=
FLAGS
.
explicit_allreduce
,
explicit_allreduce
=
FLAGS
.
explicit_allreduce
,
pre_allreduce_callbacks
=
[
common_flags
.
clip_by_global_norm_callback
])
pre_allreduce_callbacks
=
[
model_training_utils
.
clip_by_global_norm_callback
])
def
prediction_output_squad
(
strategy
,
input_meta_data
,
tokenizer
,
squad_lib
,
def
prediction_output_squad
(
strategy
,
input_meta_data
,
tokenizer
,
squad_lib
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment