Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
48b60914
Commit
48b60914
authored
Aug 20, 2019
by
Vinh Nguyen
Browse files
minor fix automatic mixed precision - resnet
parent
80af2a7b
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
4 deletions
+3
-4
official/vision/image_classification/resnet_imagenet_main.py
official/vision/image_classification/resnet_imagenet_main.py
+3
-4
No files found.
official/vision/image_classification/resnet_imagenet_main.py
View file @
48b60914
...
@@ -32,7 +32,6 @@ from official.vision.image_classification import common
...
@@ -32,7 +32,6 @@ from official.vision.image_classification import common
from
official.vision.image_classification
import
imagenet_preprocessing
from
official.vision.image_classification
import
imagenet_preprocessing
from
official.vision.image_classification
import
resnet_model
from
official.vision.image_classification
import
resnet_model
from
official.vision.image_classification
import
trivial_model
from
official.vision.image_classification
import
trivial_model
import
pdb
LR_SCHEDULE
=
[
# (multiplier, epoch to start) tuples
LR_SCHEDULE
=
[
# (multiplier, epoch to start) tuples
(
1.0
,
5
),
(
0.1
,
30
),
(
0.01
,
60
),
(
0.001
,
80
)
(
1.0
,
5
),
(
0.1
,
30
),
(
0.01
,
60
),
(
0.001
,
80
)
...
@@ -185,11 +184,11 @@ def run(flags_obj):
...
@@ -185,11 +184,11 @@ def run(flags_obj):
optimizer
=
tf
.
keras
.
mixed_precision
.
experimental
.
LossScaleOptimizer
(
optimizer
=
tf
.
keras
.
mixed_precision
.
experimental
.
LossScaleOptimizer
(
optimizer
,
loss_scale
=
flags_core
.
get_loss_scale
(
flags_obj
,
optimizer
,
loss_scale
=
flags_core
.
get_loss_scale
(
flags_obj
,
default_for_fp16
=
128
))
default_for_fp16
=
128
))
pdb
.
set_trace
()
if
flags_obj
.
fp16_implementation
==
"graph_rewrite"
:
if
flags_obj
.
fp16_implementation
==
"graph_rewrite"
:
# Note: when flags_obj
["
fp16_implementation
"]
== "graph_rewrite",
# Note: when flags_obj
.
fp16_implementation == "graph_rewrite",
# dtype as determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# dtype as determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
#
# which will ensure tf.keras.mixed_precision and tf.train.experimental.enable_mixed_precision_graph_rewrite
# does not double up.
optimizer
=
tf
.
train
.
experimental
.
enable_mixed_precision_graph_rewrite
(
optimizer
)
optimizer
=
tf
.
train
.
experimental
.
enable_mixed_precision_graph_rewrite
(
optimizer
)
if
flags_obj
.
use_trivial_model
:
if
flags_obj
.
use_trivial_model
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment