Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
ba415414
Unverified
Commit
ba415414
authored
May 31, 2019
by
Haoyu Zhang
Committed by
GitHub
May 31, 2019
Browse files
Fix various lint errors (#6934)
* Fix various lint errors * Fix logging format
parent
ab53cb74
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
41 additions
and
39 deletions
+41
-39
official/recommendation/ncf_common.py
official/recommendation/ncf_common.py
+4
-4
official/recommendation/ncf_keras_main.py
official/recommendation/ncf_keras_main.py
+26
-26
official/transformer/transformer_main.py
official/transformer/transformer_main.py
+1
-1
official/transformer/v2/data_pipeline.py
official/transformer/v2/data_pipeline.py
+1
-1
official/transformer/v2/metrics.py
official/transformer/v2/metrics.py
+7
-6
official/transformer/v2/transformer_benchmark.py
official/transformer/v2/transformer_benchmark.py
+2
-1
No files found.
official/recommendation/ncf_common.py
View file @
ba415414
...
...
@@ -143,7 +143,7 @@ def get_distribution_strategy(params):
"coordinator"
:
tpu_cluster_resolver
.
cluster_spec
()
.
as_dict
()[
"coordinator"
]
}
os
.
environ
[
'
TF_CONFIG
'
]
=
json
.
dumps
(
tf_config_env
)
os
.
environ
[
"
TF_CONFIG
"
]
=
json
.
dumps
(
tf_config_env
)
distribution
=
tf
.
distribute
.
experimental
.
TPUStrategy
(
tpu_cluster_resolver
,
steps_per_run
=
100
)
...
...
@@ -320,20 +320,20 @@ def define_ncf_flags():
name
=
"clone_model_in_keras_dist_strat"
,
default
=
True
,
help
=
flags_core
.
help_wrap
(
'
If False, then the experimental code path is used that does
n
\'
t
'
"
If False, then the experimental code path is used that does
no
t
"
"clone models for distribution."
))
flags
.
DEFINE_bool
(
name
=
"early_stopping"
,
default
=
False
,
help
=
flags_core
.
help_wrap
(
'
If True, we stop the training when it reaches hr_threshold
'
))
"
If True, we stop the training when it reaches hr_threshold
"
))
flags
.
DEFINE_bool
(
name
=
"keras_use_ctl"
,
default
=
False
,
help
=
flags_core
.
help_wrap
(
'
If True, we use a custom training loop for keras.
'
))
"
If True, we use a custom training loop for keras.
"
))
def
convert_to_softmax_logits
(
logits
):
'''Convert the logits returned by the base model to softmax logits.
...
...
official/recommendation/ncf_keras_main.py
View file @
ba415414
...
...
@@ -171,21 +171,21 @@ class CustomEarlyStopping(tf.keras.callbacks.Callback):
def
on_train_end
(
self
,
logs
=
None
):
if
self
.
stopped_epoch
>
0
:
print
(
'
Epoch %05d: early stopping
'
%
(
self
.
stopped_epoch
+
1
))
print
(
"
Epoch %05d: early stopping
"
%
(
self
.
stopped_epoch
+
1
))
def
get_monitor_value
(
self
,
logs
):
logs
=
logs
or
{}
monitor_value
=
logs
.
get
(
self
.
monitor
)
if
monitor_value
is
None
:
logging
.
warning
(
'
Early stopping conditioned on metric `%s`
'
'
which is not available. Available metrics are: %s
'
,
self
.
monitor
,
','
.
join
(
list
(
logs
.
keys
())))
logging
.
warning
(
"
Early stopping conditioned on metric `%s`
"
"
which is not available. Available metrics are: %s
"
,
self
.
monitor
,
","
.
join
(
list
(
logs
.
keys
())))
return
monitor_value
def
_get_keras_model
(
params
):
"""Constructs and returns the model."""
batch_size
=
params
[
'
batch_size
'
]
batch_size
=
params
[
"
batch_size
"
]
# The input layers are of shape (1, batch_size), to match the size of the
# input data. The first dimension is needed because the input data are
...
...
@@ -240,7 +240,7 @@ def run_ncf(_):
params
=
ncf_common
.
parse_flags
(
FLAGS
)
if
params
[
'
keras_use_ctl
'
]
and
int
(
tf
.
__version__
.
split
(
'.'
)[
0
])
==
1
:
if
params
[
"
keras_use_ctl
"
]
and
int
(
tf
.
__version__
.
split
(
"."
)[
0
])
==
1
:
logging
.
error
(
"Custom training loop only works with tensorflow 2.0 and above."
)
return
...
...
@@ -248,7 +248,7 @@ def run_ncf(_):
# ncf_common rounds eval_batch_size (this is needed due to a reshape during
# eval). This carries over that rounding to batch_size as well. This is the
# per device batch size
params
[
'
batch_size
'
]
=
params
[
'
eval_batch_size
'
]
params
[
"
batch_size
"
]
=
params
[
"
eval_batch_size
"
]
batch_size
=
params
[
"batch_size"
]
num_users
,
num_items
,
num_train_steps
,
num_eval_steps
,
producer
=
(
...
...
@@ -285,7 +285,7 @@ def run_ncf(_):
beta_2
=
params
[
"beta2"
],
epsilon
=
params
[
"epsilon"
])
if
params
[
'
keras_use_ctl
'
]:
if
params
[
"
keras_use_ctl
"
]:
loss_object
=
tf
.
losses
.
SparseCategoricalCrossentropy
(
reduction
=
tf
.
keras
.
losses
.
Reduction
.
SUM
,
from_logits
=
True
)
...
...
@@ -352,8 +352,8 @@ def run_ncf(_):
time_callback
.
on_batch_begin
(
step
+
epoch
*
num_train_steps
)
train_loss
+=
train_step
()
time_callback
.
on_batch_end
(
step
+
epoch
*
num_train_steps
)
logging
.
info
(
"Done training epoch
{}
, epoch loss=
{}."
.
format
(
epoch
+
1
,
train_loss
/
num_train_steps
)
)
logging
.
info
(
"Done training epoch
%s
, epoch loss=
%s."
,
epoch
+
1
,
train_loss
/
num_train_steps
)
eval_input_iterator
.
initialize
()
hr_sum
=
0
hr_count
=
0
...
...
@@ -361,8 +361,7 @@ def run_ncf(_):
step_hr_sum
,
step_hr_count
=
eval_step
()
hr_sum
+=
step_hr_sum
hr_count
+=
step_hr_count
logging
.
info
(
"Done eval epoch {}, hr={}."
.
format
(
epoch
+
1
,
hr_sum
/
hr_count
))
logging
.
info
(
"Done eval epoch %s, hr=%s."
,
epoch
+
1
,
hr_sum
/
hr_count
)
if
(
FLAGS
.
early_stopping
and
float
(
hr_sum
/
hr_count
)
>
params
[
"hr_threshold"
]):
...
...
@@ -399,7 +398,7 @@ def run_ncf(_):
if
history
and
history
.
history
:
train_history
=
history
.
history
train_loss
=
train_history
[
'
loss
'
][
-
1
]
train_loss
=
train_history
[
"
loss
"
][
-
1
]
stats
=
build_stats
(
train_loss
,
eval_results
,
time_callback
)
return
stats
...
...
@@ -410,26 +409,27 @@ def build_stats(loss, eval_result, time_callback):
Args:
loss: The final loss at training time.
eval_
outpu
t: Output of the eval step. Assumes first value is eval_loss and
eval_
resul
t: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats
=
{}
if
loss
:
stats
[
'
loss
'
]
=
loss
stats
[
"
loss
"
]
=
loss
if
eval_result
:
stats
[
'
eval_loss
'
]
=
eval_result
[
0
]
stats
[
'
eval_hit_rate
'
]
=
eval_result
[
1
]
stats
[
"
eval_loss
"
]
=
eval_result
[
0
]
stats
[
"
eval_hit_rate
"
]
=
eval_result
[
1
]
if
time_callback
:
timestamp_log
=
time_callback
.
timestamp_log
stats
[
'
step_timestamp_log
'
]
=
timestamp_log
stats
[
'
train_finish_time
'
]
=
time_callback
.
train_finish_time
stats
[
"
step_timestamp_log
"
]
=
timestamp_log
stats
[
"
train_finish_time
"
]
=
time_callback
.
train_finish_time
if
len
(
timestamp_log
)
>
1
:
stats
[
'
avg_exp_per_second
'
]
=
(
stats
[
"
avg_exp_per_second
"
]
=
(
time_callback
.
batch_size
*
time_callback
.
log_steps
*
(
len
(
time_callback
.
timestamp_log
)
-
1
)
/
(
timestamp_log
[
-
1
].
timestamp
-
timestamp_log
[
0
].
timestamp
))
...
...
official/transformer/transformer_main.py
View file @
ba415414
...
...
@@ -583,7 +583,7 @@ def run_transformer(flags_obj):
params
[
"static_batch"
]
=
flags_obj
.
static_batch
or
params
[
"use_tpu"
]
params
[
"allow_ffn_pad"
]
=
not
params
[
"use_tpu"
]
params
[
"max_length"
]
=
flags_obj
.
max_length
or
params
[
'
max_length
'
]
params
[
"max_length"
]
=
flags_obj
.
max_length
or
params
[
"
max_length
"
]
params
[
"use_synthetic_data"
]
=
flags_obj
.
use_synthetic_data
...
...
official/transformer/v2/data_pipeline.py
View file @
ba415414
...
...
@@ -164,7 +164,7 @@ def _batch_examples(dataset, batch_size, max_length):
"""Return int64 bucket id for this example, calculated based on length."""
seq_length
=
_get_example_length
((
example_input
,
example_target
))
# TODO: investigate
whether
removing code branching improves performance.
# TODO
(xunkai)
: investigate
if
removing code branching improves performance.
conditions_c
=
tf
.
logical_and
(
tf
.
less_equal
(
buckets_min
,
seq_length
),
tf
.
less
(
seq_length
,
buckets_max
))
...
...
official/transformer/v2/metrics.py
View file @
ba415414
...
...
@@ -160,9 +160,10 @@ class MetricLayer(tf.keras.layers.Layer):
def
call
(
self
,
inputs
):
logits
,
targets
=
inputs
[
0
],
inputs
[
1
]
# TODO(guptapriya): Remove this check when underlying issue to create metrics
# with dist strat in cross replica context is fixed.
if
tf
.
distribute
.
has_strategy
()
and
not
tf
.
distribute
.
in_cross_replica_context
():
# TODO(guptapriya): Remove this check when underlying issue to create
# metrics with dist strat in cross replica context is fixed.
if
(
tf
.
distribute
.
has_strategy
()
and
not
tf
.
distribute
.
in_cross_replica_context
()):
for
mean
,
fn
in
self
.
metric_mean_fns
:
m
=
mean
(
*
fn
(
logits
,
targets
))
self
.
add_metric
(
m
)
...
...
official/transformer/v2/transformer_benchmark.py
View file @
ba415414
...
...
@@ -225,6 +225,7 @@ class TransformerBaseKerasAccuracy(TransformerBenchmark):
bleu_min
=
27
,
bleu_max
=
28
)
class
TransformerBigKerasAccuracy
(
TransformerBenchmark
):
"""Benchmark accuracy tests for Transformer Big model w/ Keras."""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment