Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
7ebfc3dd
Unverified
Commit
7ebfc3dd
authored
Feb 23, 2020
by
Ayushman Kumar
Committed by
GitHub
Feb 23, 2020
Browse files
Merge pull request
#1
from tensorflow/master
updated
parents
867f0c47
6f0e3a0b
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
161 additions
and
98 deletions
+161
-98
official/nlp/modeling/layers/transformer.py
official/nlp/modeling/layers/transformer.py
+19
-16
official/staging/training/utils.py
official/staging/training/utils.py
+10
-2
official/utils/misc/keras_utils.py
official/utils/misc/keras_utils.py
+66
-21
official/vision/detection/configs/maskrcnn_config.py
official/vision/detection/configs/maskrcnn_config.py
+3
-1
official/vision/detection/configs/retinanet_config.py
official/vision/detection/configs/retinanet_config.py
+3
-3
official/vision/detection/modeling/base_model.py
official/vision/detection/modeling/base_model.py
+13
-9
official/vision/detection/modeling/retinanet_model.py
official/vision/detection/modeling/retinanet_model.py
+1
-2
official/vision/image_classification/common.py
official/vision/image_classification/common.py
+7
-6
official/vision/image_classification/resnet_ctl_imagenet_main.py
...l/vision/image_classification/resnet_ctl_imagenet_main.py
+6
-11
official/vision/image_classification/resnet_runnable.py
official/vision/image_classification/resnet_runnable.py
+5
-12
research/audioset/yamnet/yamnet_visualization.ipynb
research/audioset/yamnet/yamnet_visualization.ipynb
+28
-15
No files found.
official/nlp/modeling/layers/transformer.py
View file @
7ebfc3dd
...
@@ -193,6 +193,7 @@ class Transformer(tf.keras.layers.Layer):
...
@@ -193,6 +193,7 @@ class Transformer(tf.keras.layers.Layer):
base_config
=
super
(
Transformer
,
self
).
get_config
()
base_config
=
super
(
Transformer
,
self
).
get_config
()
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
return
dict
(
list
(
base_config
.
items
())
+
list
(
config
.
items
()))
@
tf
.
function
(
experimental_compile
=
True
)
def
call
(
self
,
inputs
):
def
call
(
self
,
inputs
):
if
isinstance
(
inputs
,
(
list
,
tuple
))
and
len
(
inputs
)
==
2
:
if
isinstance
(
inputs
,
(
list
,
tuple
))
and
len
(
inputs
)
==
2
:
input_tensor
,
attention_mask
=
inputs
input_tensor
,
attention_mask
=
inputs
...
@@ -204,19 +205,21 @@ class Transformer(tf.keras.layers.Layer):
...
@@ -204,19 +205,21 @@ class Transformer(tf.keras.layers.Layer):
if
attention_mask
is
not
None
:
if
attention_mask
is
not
None
:
attention_inputs
.
append
(
attention_mask
)
attention_inputs
.
append
(
attention_mask
)
attention_output
=
self
.
_attention_layer
(
attention_inputs
)
with
tf
.
name_scope
(
self
.
name
):
attention_output
=
self
.
_attention_output_dense
(
attention_output
)
attention_output
=
self
.
_attention_layer
(
attention_inputs
)
attention_output
=
self
.
_attention_dropout
(
attention_output
)
attention_output
=
self
.
_attention_output_dense
(
attention_output
)
attention_output
=
self
.
_attention_layer_norm
(
input_tensor
+
attention_output
=
self
.
_attention_dropout
(
attention_output
)
attention_output
)
attention_output
=
self
.
_attention_layer_norm
(
input_tensor
+
intermediate_output
=
self
.
_intermediate_dense
(
attention_output
)
attention_output
)
intermediate_output
=
self
.
_intermediate_activation_layer
(
intermediate_output
=
self
.
_intermediate_dense
(
attention_output
)
intermediate_output
)
intermediate_output
=
self
.
_intermediate_activation_layer
(
layer_output
=
self
.
_output_dense
(
intermediate_output
)
intermediate_output
)
layer_output
=
self
.
_output_dropout
(
layer_output
)
layer_output
=
self
.
_output_dense
(
intermediate_output
)
# During mixed precision training, attention_output is from layer norm and
layer_output
=
self
.
_output_dropout
(
layer_output
)
# is always fp32 for now. cast layer_output to fp32 for the subsequent add.
# During mixed precision training, attention_output is from layer norm and
layer_output
=
tf
.
cast
(
layer_output
,
tf
.
float32
)
# is always fp32 for now. Cast layer_output to fp32 for the subsequent
layer_output
=
self
.
_output_layer_norm
(
layer_output
+
attention_output
)
# add.
layer_output
=
tf
.
cast
(
layer_output
,
tf
.
float32
)
return
layer_output
layer_output
=
self
.
_output_layer_norm
(
layer_output
+
attention_output
)
return
layer_output
official/staging/training/utils.py
View file @
7ebfc3dd
...
@@ -298,13 +298,16 @@ class EpochHelper(object):
...
@@ -298,13 +298,16 @@ class EpochHelper(object):
self
.
_epoch_steps
=
epoch_steps
self
.
_epoch_steps
=
epoch_steps
self
.
_global_step
=
global_step
self
.
_global_step
=
global_step
self
.
_current_epoch
=
None
self
.
_current_epoch
=
None
self
.
_epoch_start_step
=
None
self
.
_in_epoch
=
False
self
.
_in_epoch
=
False
def
epoch_begin
(
self
):
def
epoch_begin
(
self
):
"""Returns whether a new epoch should begin."""
"""Returns whether a new epoch should begin."""
if
self
.
_in_epoch
:
if
self
.
_in_epoch
:
return
False
return
False
self
.
_current_epoch
=
self
.
_global_step
.
numpy
()
/
self
.
_epoch_steps
current_step
=
self
.
_global_step
.
numpy
()
self
.
_epoch_start_step
=
current_step
self
.
_current_epoch
=
current_step
//
self
.
_epoch_steps
self
.
_in_epoch
=
True
self
.
_in_epoch
=
True
return
True
return
True
...
@@ -313,13 +316,18 @@ class EpochHelper(object):
...
@@ -313,13 +316,18 @@ class EpochHelper(object):
if
not
self
.
_in_epoch
:
if
not
self
.
_in_epoch
:
raise
ValueError
(
"`epoch_end` can only be called inside an epoch"
)
raise
ValueError
(
"`epoch_end` can only be called inside an epoch"
)
current_step
=
self
.
_global_step
.
numpy
()
current_step
=
self
.
_global_step
.
numpy
()
epoch
=
current_step
/
self
.
_epoch_steps
epoch
=
current_step
/
/
self
.
_epoch_steps
if
epoch
>
self
.
_current_epoch
:
if
epoch
>
self
.
_current_epoch
:
self
.
_in_epoch
=
False
self
.
_in_epoch
=
False
return
True
return
True
return
False
return
False
@
property
def
batch_index
(
self
):
"""Index of the next batch within the current epoch."""
return
self
.
_global_step
.
numpy
()
-
self
.
_epoch_start_step
@
property
@
property
def
current_epoch
(
self
):
def
current_epoch
(
self
):
return
self
.
_current_epoch
return
self
.
_current_epoch
official/utils/misc/keras_utils.py
View file @
7ebfc3dd
...
@@ -26,7 +26,7 @@ from absl import logging
...
@@ -26,7 +26,7 @@ from absl import logging
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.core.protobuf
import
rewriter_config_pb2
from
tensorflow.core.protobuf
import
rewriter_config_pb2
from
tensorflow.python
import
tf2
from
tensorflow.python
import
tf2
from
tensorflow.python.
profil
er
import
profiler
_v2
as
profiler
from
tensorflow.python.
eag
er
import
profiler
class
BatchTimestamp
(
object
):
class
BatchTimestamp
(
object
):
...
@@ -44,17 +44,28 @@ class BatchTimestamp(object):
...
@@ -44,17 +44,28 @@ class BatchTimestamp(object):
class
TimeHistory
(
tf
.
keras
.
callbacks
.
Callback
):
class
TimeHistory
(
tf
.
keras
.
callbacks
.
Callback
):
"""Callback for Keras models."""
"""Callback for Keras models."""
def
__init__
(
self
,
batch_size
,
log_steps
):
def
__init__
(
self
,
batch_size
,
log_steps
,
logdir
=
None
):
"""Callback for logging performance.
"""Callback for logging performance.
Args:
Args:
batch_size: Total batch size.
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self
.
batch_size
=
batch_size
self
.
batch_size
=
batch_size
super
(
TimeHistory
,
self
).
__init__
()
super
(
TimeHistory
,
self
).
__init__
()
self
.
log_steps
=
log_steps
self
.
log_steps
=
log_steps
self
.
global_steps
=
0
self
.
last_log_step
=
0
self
.
steps_before_epoch
=
0
self
.
steps_in_epoch
=
0
self
.
start_time
=
None
if
logdir
:
self
.
summary_writer
=
tf
.
summary
.
create_file_writer
(
logdir
)
else
:
self
.
summary_writer
=
None
# Logs start of step 1 then end of each step based on log_steps interval.
# Logs start of step 1 then end of each step based on log_steps interval.
self
.
timestamp_log
=
[]
self
.
timestamp_log
=
[]
...
@@ -62,38 +73,70 @@ class TimeHistory(tf.keras.callbacks.Callback):
...
@@ -62,38 +73,70 @@ class TimeHistory(tf.keras.callbacks.Callback):
# Records the time each epoch takes to run from start to finish of epoch.
# Records the time each epoch takes to run from start to finish of epoch.
self
.
epoch_runtime_log
=
[]
self
.
epoch_runtime_log
=
[]
@
property
def
global_steps
(
self
):
"""The current 1-indexed global step."""
return
self
.
steps_before_epoch
+
self
.
steps_in_epoch
@
property
def
average_steps_per_second
(
self
):
"""The average training steps per second across all epochs."""
return
self
.
global_steps
/
sum
(
self
.
epoch_runtime_log
)
@
property
def
average_examples_per_second
(
self
):
"""The average number of training examples per second across all epochs."""
return
self
.
average_steps_per_second
*
self
.
batch_size
def
on_train_end
(
self
,
logs
=
None
):
def
on_train_end
(
self
,
logs
=
None
):
self
.
train_finish_time
=
time
.
time
()
self
.
train_finish_time
=
time
.
time
()
if
self
.
summary_writer
:
self
.
summary_writer
.
flush
()
def
on_epoch_begin
(
self
,
epoch
,
logs
=
None
):
def
on_epoch_begin
(
self
,
epoch
,
logs
=
None
):
self
.
epoch_start
=
time
.
time
()
self
.
epoch_start
=
time
.
time
()
def
on_batch_begin
(
self
,
batch
,
logs
=
None
):
def
on_batch_begin
(
self
,
batch
,
logs
=
None
):
self
.
global_steps
+=
1
if
not
self
.
start_time
:
if
self
.
global_steps
==
1
:
self
.
start_time
=
time
.
time
()
self
.
start_time
=
time
.
time
()
# Record the timestamp of the first global step
if
not
self
.
timestamp_log
:
self
.
timestamp_log
.
append
(
BatchTimestamp
(
self
.
global_steps
,
self
.
timestamp_log
.
append
(
BatchTimestamp
(
self
.
global_steps
,
self
.
start_time
))
self
.
start_time
))
def
on_batch_end
(
self
,
batch
,
logs
=
None
):
def
on_batch_end
(
self
,
batch
,
logs
=
None
):
"""Records elapse time of the batch and calculates examples per second."""
"""Records elapse time of the batch and calculates examples per second."""
if
self
.
global_steps
%
self
.
log_steps
==
0
:
self
.
steps_in_epoch
=
batch
+
1
timestamp
=
time
.
time
()
steps_since_last_log
=
self
.
global_steps
-
self
.
last_log_step
elapsed_time
=
timestamp
-
self
.
start_time
if
steps_since_last_log
>=
self
.
log_steps
:
examples_per_second
=
(
self
.
batch_size
*
self
.
log_steps
)
/
elapsed_time
now
=
time
.
time
()
self
.
timestamp_log
.
append
(
BatchTimestamp
(
self
.
global_steps
,
timestamp
))
elapsed_time
=
now
-
self
.
start_time
steps_per_second
=
steps_since_last_log
/
elapsed_time
examples_per_second
=
steps_per_second
*
self
.
batch_size
self
.
timestamp_log
.
append
(
BatchTimestamp
(
self
.
global_steps
,
now
))
logging
.
info
(
logging
.
info
(
"BenchmarkMetric: {'global step':%d, 'time_taken': %f,"
"TimeHistory: %.2f examples/second between steps %d and %d"
,
"'examples_per_second': %f}"
,
examples_per_second
,
self
.
last_log_step
,
self
.
global_steps
)
self
.
global_steps
,
elapsed_time
,
examples_per_second
)
self
.
start_time
=
timestamp
if
self
.
summary_writer
:
with
self
.
summary_writer
.
as_default
():
tf
.
summary
.
scalar
(
'global_step/sec'
,
steps_per_second
,
self
.
global_steps
)
tf
.
summary
.
scalar
(
'examples/sec'
,
examples_per_second
,
self
.
global_steps
)
self
.
last_log_step
=
self
.
global_steps
self
.
start_time
=
None
def
on_epoch_end
(
self
,
epoch
,
logs
=
None
):
def
on_epoch_end
(
self
,
epoch
,
logs
=
None
):
epoch_run_time
=
time
.
time
()
-
self
.
epoch_start
epoch_run_time
=
time
.
time
()
-
self
.
epoch_start
self
.
epoch_runtime_log
.
append
(
epoch_run_time
)
self
.
epoch_runtime_log
.
append
(
epoch_run_time
)
logging
.
info
(
"BenchmarkMetric: {'epoch':%d, 'time_taken': %f}"
,
self
.
steps_before_epoch
+=
self
.
steps_in_epoch
epoch
,
epoch_run_time
)
self
.
steps_in_epoch
=
0
def
get_profiler_callback
(
model_dir
,
profile_steps
,
enable_tensorboard
,
def
get_profiler_callback
(
model_dir
,
profile_steps
,
enable_tensorboard
,
...
@@ -145,15 +188,17 @@ class ProfilerCallback(tf.keras.callbacks.Callback):
...
@@ -145,15 +188,17 @@ class ProfilerCallback(tf.keras.callbacks.Callback):
def
on_batch_begin
(
self
,
batch
,
logs
=
None
):
def
on_batch_begin
(
self
,
batch
,
logs
=
None
):
if
batch
==
self
.
start_step_in_epoch
and
self
.
should_start
:
if
batch
==
self
.
start_step_in_epoch
and
self
.
should_start
:
self
.
should_start
=
False
self
.
should_start
=
False
profiler
.
start
(
self
.
log_dir
)
profiler
.
start
()
logging
.
info
(
'Profiler started at Step %s'
,
self
.
start_step
)
logging
.
info
(
'Profiler started at Step %s'
,
self
.
start_step
)
def
on_batch_end
(
self
,
batch
,
logs
=
None
):
def
on_batch_end
(
self
,
batch
,
logs
=
None
):
if
batch
==
self
.
stop_step_in_epoch
and
self
.
should_stop
:
if
batch
==
self
.
stop_step_in_epoch
and
self
.
should_stop
:
self
.
should_stop
=
False
self
.
should_stop
=
False
profiler
.
stop
()
results
=
profiler
.
stop
()
logging
.
info
(
'Profiler saved profiles for steps between %s and %s to %s'
,
profiler
.
save
(
self
.
log_dir
,
results
)
self
.
start_step
,
self
.
stop_step
,
self
.
log_dir
)
logging
.
info
(
'Profiler saved profiles for steps between %s and %s to %s'
,
self
.
start_step
,
self
.
stop_step
,
self
.
log_dir
)
def
set_session_config
(
enable_eager
=
False
,
def
set_session_config
(
enable_eager
=
False
,
...
...
official/vision/detection/configs/maskrcnn_config.py
View file @
7ebfc3dd
...
@@ -14,8 +14,9 @@
...
@@ -14,8 +14,9 @@
# ==============================================================================
# ==============================================================================
"""Config template to train Mask R-CNN."""
"""Config template to train Mask R-CNN."""
from
official.vision.detection.configs
import
base_config
from
official.modeling.hyperparams
import
params_dict
from
official.modeling.hyperparams
import
params_dict
from
official.vision.detection.configs
import
base_config
# pylint: disable=line-too-long
# pylint: disable=line-too-long
MASKRCNN_CFG
=
params_dict
.
ParamsDict
(
base_config
.
BASE_CFG
)
MASKRCNN_CFG
=
params_dict
.
ParamsDict
(
base_config
.
BASE_CFG
)
...
@@ -23,6 +24,7 @@ MASKRCNN_CFG.override({
...
@@ -23,6 +24,7 @@ MASKRCNN_CFG.override({
'type'
:
'mask_rcnn'
,
'type'
:
'mask_rcnn'
,
'eval'
:
{
'eval'
:
{
'type'
:
'box_and_mask'
,
'type'
:
'box_and_mask'
,
'num_images_to_visualize'
:
0
,
},
},
'architecture'
:
{
'architecture'
:
{
'parser'
:
'maskrcnn_parser'
,
'parser'
:
'maskrcnn_parser'
,
...
...
official/vision/detection/configs/retinanet_config.py
View file @
7ebfc3dd
...
@@ -23,9 +23,8 @@
...
@@ -23,9 +23,8 @@
# need to be fine-tuned for the detection task.
# need to be fine-tuned for the detection task.
# Note that we need to trailing `/` to avoid the incorrect match.
# Note that we need to trailing `/` to avoid the incorrect match.
# [1]: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py#L198
# [1]: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py#L198
RESNET50_FROZEN_VAR_PREFIX
=
r
'(resnet\d+/)conv2d(|_([1-9]|10))\/'
RESNET_FROZEN_VAR_PREFIX
=
r
'(resnet\d+)\/(conv2d(|_([1-9]|10))|batch_normalization(|_([1-9]|10)))\/'
RESNET_FROZEN_VAR_PREFIX
=
r
'(resnet\d+)\/(conv2d(|_([1-9]|10))|batch_normalization(|_([1-9]|10)))\/'
REGULARIZATION_VAR_REGEX
=
r
'.*(kernel|weight):0$'
# pylint: disable=line-too-long
# pylint: disable=line-too-long
RETINANET_CFG
=
{
RETINANET_CFG
=
{
...
@@ -54,10 +53,11 @@ RETINANET_CFG = {
...
@@ -54,10 +53,11 @@ RETINANET_CFG = {
'path'
:
''
,
'path'
:
''
,
'prefix'
:
''
,
'prefix'
:
''
,
},
},
'frozen_variable_prefix'
:
RESNET
50
_FROZEN_VAR_PREFIX
,
'frozen_variable_prefix'
:
RESNET_FROZEN_VAR_PREFIX
,
'train_file_pattern'
:
''
,
'train_file_pattern'
:
''
,
# TODO(b/142174042): Support transpose_input option.
# TODO(b/142174042): Support transpose_input option.
'transpose_input'
:
False
,
'transpose_input'
:
False
,
'regularization_variable_regex'
:
REGULARIZATION_VAR_REGEX
,
'l2_weight_decay'
:
0.0001
,
'l2_weight_decay'
:
0.0001
,
'input_sharding'
:
False
,
'input_sharding'
:
False
,
},
},
...
...
official/vision/detection/modeling/base_model.py
View file @
7ebfc3dd
...
@@ -18,11 +18,9 @@ from __future__ import absolute_import
...
@@ -18,11 +18,9 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
import
abc
import
abc
import
functools
import
functools
import
re
import
re
import
six
from
absl
import
logging
from
absl
import
logging
import
tensorflow.compat.v2
as
tf
import
tensorflow.compat.v2
as
tf
...
@@ -53,7 +51,7 @@ class OptimizerFactory(object):
...
@@ -53,7 +51,7 @@ class OptimizerFactory(object):
self
.
_optimizer
=
tf
.
keras
.
optimizers
.
Adagrad
self
.
_optimizer
=
tf
.
keras
.
optimizers
.
Adagrad
elif
params
.
type
==
'rmsprop'
:
elif
params
.
type
==
'rmsprop'
:
self
.
_optimizer
=
functools
.
partial
(
self
.
_optimizer
=
functools
.
partial
(
tf
.
keras
.
optimizers
.
RMS
P
rop
,
momentum
=
params
.
momentum
)
tf
.
keras
.
optimizers
.
RMS
p
rop
,
momentum
=
params
.
momentum
)
else
:
else
:
raise
ValueError
(
'Unsupported optimizer type %s.'
%
self
.
_optimizer
)
raise
ValueError
(
'Unsupported optimizer type %s.'
%
self
.
_optimizer
)
...
@@ -104,6 +102,7 @@ class Model(object):
...
@@ -104,6 +102,7 @@ class Model(object):
params
.
train
.
learning_rate
)
params
.
train
.
learning_rate
)
self
.
_frozen_variable_prefix
=
params
.
train
.
frozen_variable_prefix
self
.
_frozen_variable_prefix
=
params
.
train
.
frozen_variable_prefix
self
.
_regularization_var_regex
=
params
.
train
.
regularization_variable_regex
self
.
_l2_weight_decay
=
params
.
train
.
l2_weight_decay
self
.
_l2_weight_decay
=
params
.
train
.
l2_weight_decay
# Checkpoint restoration.
# Checkpoint restoration.
...
@@ -146,12 +145,17 @@ class Model(object):
...
@@ -146,12 +145,17 @@ class Model(object):
"""
"""
return
_make_filter_trainable_variables_fn
(
self
.
_frozen_variable_prefix
)
return
_make_filter_trainable_variables_fn
(
self
.
_frozen_variable_prefix
)
def
weight_decay_loss
(
self
,
l2_weight_decay
,
trainable_variables
):
def
weight_decay_loss
(
self
,
trainable_variables
):
return
l2_weight_decay
*
tf
.
add_n
([
reg_variables
=
[
tf
.
nn
.
l2_loss
(
v
)
v
for
v
in
trainable_variables
for
v
in
trainable_variables
if
self
.
_regularization_var_regex
is
None
if
'batch_normalization'
not
in
v
.
name
and
'bias'
not
in
v
.
name
or
re
.
match
(
self
.
_regularization_var_regex
,
v
.
name
)
])
]
logging
.
info
(
'Regularization Variables: %s'
,
[
v
.
name
for
v
in
reg_variables
])
return
self
.
_l2_weight_decay
*
tf
.
add_n
(
[
tf
.
nn
.
l2_loss
(
v
)
for
v
in
reg_variables
])
def
make_restore_checkpoint_fn
(
self
):
def
make_restore_checkpoint_fn
(
self
):
"""Returns scaffold function to restore parameters from v1 checkpoint."""
"""Returns scaffold function to restore parameters from v1 checkpoint."""
...
...
official/vision/detection/modeling/retinanet_model.py
View file @
7ebfc3dd
...
@@ -106,8 +106,7 @@ class RetinanetModel(base_model.Model):
...
@@ -106,8 +106,7 @@ class RetinanetModel(base_model.Model):
labels
[
'box_targets'
],
labels
[
'box_targets'
],
labels
[
'num_positives'
])
labels
[
'num_positives'
])
model_loss
=
cls_loss
+
self
.
_box_loss_weight
*
box_loss
model_loss
=
cls_loss
+
self
.
_box_loss_weight
*
box_loss
l2_regularization_loss
=
self
.
weight_decay_loss
(
self
.
_l2_weight_decay
,
l2_regularization_loss
=
self
.
weight_decay_loss
(
trainable_variables
)
trainable_variables
)
total_loss
=
model_loss
+
l2_regularization_loss
total_loss
=
model_loss
+
l2_regularization_loss
return
{
return
{
'total_loss'
:
total_loss
,
'total_loss'
:
total_loss
,
...
...
official/vision/image_classification/common.py
View file @
7ebfc3dd
...
@@ -188,7 +188,10 @@ def get_callbacks(
...
@@ -188,7 +188,10 @@ def get_callbacks(
enable_checkpoint_and_export
=
False
,
enable_checkpoint_and_export
=
False
,
model_dir
=
None
):
model_dir
=
None
):
"""Returns common callbacks."""
"""Returns common callbacks."""
time_callback
=
keras_utils
.
TimeHistory
(
FLAGS
.
batch_size
,
FLAGS
.
log_steps
)
time_callback
=
keras_utils
.
TimeHistory
(
FLAGS
.
batch_size
,
FLAGS
.
log_steps
,
logdir
=
FLAGS
.
model_dir
if
FLAGS
.
enable_tensorboard
else
None
)
callbacks
=
[
time_callback
]
callbacks
=
[
time_callback
]
if
not
FLAGS
.
use_tensor_lr
and
learning_rate_schedule_fn
:
if
not
FLAGS
.
use_tensor_lr
and
learning_rate_schedule_fn
:
...
@@ -265,11 +268,9 @@ def build_stats(history, eval_output, callbacks):
...
@@ -265,11 +268,9 @@ def build_stats(history, eval_output, callbacks):
timestamp_log
=
callback
.
timestamp_log
timestamp_log
=
callback
.
timestamp_log
stats
[
'step_timestamp_log'
]
=
timestamp_log
stats
[
'step_timestamp_log'
]
=
timestamp_log
stats
[
'train_finish_time'
]
=
callback
.
train_finish_time
stats
[
'train_finish_time'
]
=
callback
.
train_finish_time
if
len
(
timestamp_log
)
>
1
:
if
callback
.
epoch_runtime_log
:
stats
[
'avg_exp_per_second'
]
=
(
stats
[
'avg_exp_per_second'
]
=
callback
.
average_examples_per_second
callback
.
batch_size
*
callback
.
log_steps
*
(
len
(
callback
.
timestamp_log
)
-
1
)
/
(
timestamp_log
[
-
1
].
timestamp
-
timestamp_log
[
0
].
timestamp
))
return
stats
return
stats
...
...
official/vision/image_classification/resnet_ctl_imagenet_main.py
View file @
7ebfc3dd
...
@@ -64,15 +64,8 @@ def build_stats(runnable, time_callback):
...
@@ -64,15 +64,8 @@ def build_stats(runnable, time_callback):
timestamp_log
=
time_callback
.
timestamp_log
timestamp_log
=
time_callback
.
timestamp_log
stats
[
'step_timestamp_log'
]
=
timestamp_log
stats
[
'step_timestamp_log'
]
=
timestamp_log
stats
[
'train_finish_time'
]
=
time_callback
.
train_finish_time
stats
[
'train_finish_time'
]
=
time_callback
.
train_finish_time
if
len
(
timestamp_log
)
>
1
:
if
time_callback
.
epoch_runtime_log
:
stats
[
'avg_exp_per_second'
]
=
(
stats
[
'avg_exp_per_second'
]
=
time_callback
.
average_examples_per_second
time_callback
.
batch_size
*
time_callback
.
log_steps
*
(
len
(
time_callback
.
timestamp_log
)
-
1
)
/
(
timestamp_log
[
-
1
].
timestamp
-
timestamp_log
[
0
].
timestamp
))
avg_exp_per_second
=
tf
.
reduce_mean
(
runnable
.
examples_per_second_history
).
numpy
(),
stats
[
'avg_exp_per_second'
]
=
avg_exp_per_second
return
stats
return
stats
...
@@ -154,8 +147,10 @@ def run(flags_obj):
...
@@ -154,8 +147,10 @@ def run(flags_obj):
'total steps: %d; Eval %d steps'
,
train_epochs
,
per_epoch_steps
,
'total steps: %d; Eval %d steps'
,
train_epochs
,
per_epoch_steps
,
train_epochs
*
per_epoch_steps
,
eval_steps
)
train_epochs
*
per_epoch_steps
,
eval_steps
)
time_callback
=
keras_utils
.
TimeHistory
(
flags_obj
.
batch_size
,
time_callback
=
keras_utils
.
TimeHistory
(
flags_obj
.
log_steps
)
flags_obj
.
batch_size
,
flags_obj
.
log_steps
,
logdir
=
flags_obj
.
model_dir
if
flags_obj
.
enable_tensorboard
else
None
)
with
distribution_utils
.
get_strategy_scope
(
strategy
):
with
distribution_utils
.
get_strategy_scope
(
strategy
):
runnable
=
resnet_runnable
.
ResnetRunnable
(
flags_obj
,
time_callback
,
runnable
=
resnet_runnable
.
ResnetRunnable
(
flags_obj
,
time_callback
,
per_epoch_steps
)
per_epoch_steps
)
...
...
official/vision/image_classification/resnet_runnable.py
View file @
7ebfc3dd
...
@@ -114,7 +114,6 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
...
@@ -114,7 +114,6 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
# Handling epochs.
# Handling epochs.
self
.
epoch_steps
=
epoch_steps
self
.
epoch_steps
=
epoch_steps
self
.
epoch_helper
=
utils
.
EpochHelper
(
epoch_steps
,
self
.
global_step
)
self
.
epoch_helper
=
utils
.
EpochHelper
(
epoch_steps
,
self
.
global_step
)
self
.
examples_per_second_history
=
[]
def
build_train_dataset
(
self
):
def
build_train_dataset
(
self
):
"""See base class."""
"""See base class."""
...
@@ -147,8 +146,8 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
...
@@ -147,8 +146,8 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
self
.
train_loss
.
reset_states
()
self
.
train_loss
.
reset_states
()
self
.
train_accuracy
.
reset_states
()
self
.
train_accuracy
.
reset_states
()
self
.
time_callback
.
on_batch_begin
(
self
.
global_step
)
self
.
_epoch_begin
()
self
.
_epoch_begin
()
self
.
time_callback
.
on_batch_begin
(
self
.
epoch_helper
.
batch_index
)
def
train_step
(
self
,
iterator
):
def
train_step
(
self
,
iterator
):
"""See base class."""
"""See base class."""
...
@@ -194,12 +193,13 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
...
@@ -194,12 +193,13 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
def
train_loop_end
(
self
):
def
train_loop_end
(
self
):
"""See base class."""
"""See base class."""
self
.
time_callback
.
on_batch_end
(
self
.
global_step
)
metrics
=
{
self
.
_epoch_end
()
return
{
'train_loss'
:
self
.
train_loss
.
result
(),
'train_loss'
:
self
.
train_loss
.
result
(),
'train_accuracy'
:
self
.
train_accuracy
.
result
(),
'train_accuracy'
:
self
.
train_accuracy
.
result
(),
}
}
self
.
time_callback
.
on_batch_end
(
self
.
epoch_helper
.
batch_index
-
1
)
self
.
_epoch_end
()
return
metrics
def
eval_begin
(
self
):
def
eval_begin
(
self
):
"""See base class."""
"""See base class."""
...
@@ -234,10 +234,3 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
...
@@ -234,10 +234,3 @@ class ResnetRunnable(standard_runnable.StandardTrainable,
def
_epoch_end
(
self
):
def
_epoch_end
(
self
):
if
self
.
epoch_helper
.
epoch_end
():
if
self
.
epoch_helper
.
epoch_end
():
self
.
time_callback
.
on_epoch_end
(
self
.
epoch_helper
.
current_epoch
)
self
.
time_callback
.
on_epoch_end
(
self
.
epoch_helper
.
current_epoch
)
epoch_time
=
self
.
time_callback
.
epoch_runtime_log
[
-
1
]
steps_per_second
=
self
.
epoch_steps
/
epoch_time
examples_per_second
=
steps_per_second
*
self
.
flags_obj
.
batch_size
self
.
examples_per_second_history
.
append
(
examples_per_second
)
tf
.
summary
.
scalar
(
'global_step/sec'
,
steps_per_second
)
tf
.
summary
.
scalar
(
'examples/sec'
,
examples_per_second
)
research/audioset/yamnet/yamnet_visualization.ipynb
View file @
7ebfc3dd
...
@@ -53,6 +53,34 @@
...
@@ -53,6 +53,34 @@
"cell_type": "code",
"cell_type": "code",
"execution_count": 2,
"execution_count": 2,
"metadata": {},
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sample rate = 16000\n"
]
}
],
"source": [
"# Read in the audio.\n",
"# You can get this example waveform via:\n",
"# curl -O https://storage.googleapis.com/audioset/speech_whistling2.wav\n",
"\n",
"wav_file_name = 'speech_whistling2.wav'\n",
"\n",
"wav_data, sr = sf.read(wav_file_name, dtype=np.int16)\n",
"waveform = wav_data / 32768.0\n",
"# The graph is designed for a sampling rate of 16 kHz, but higher rates \n",
"# should work too.\n",
"params.SAMPLE_RATE = sr\n",
"print(\"Sample rate =\", params.SAMPLE_RATE)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
"outputs": [
{
{
"name": "stdout",
"name": "stdout",
...
@@ -74,21 +102,6 @@
...
@@ -74,21 +102,6 @@
" yamnet.load_weights('yamnet.h5')"
" yamnet.load_weights('yamnet.h5')"
]
]
},
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Read in the audio.\n",
"# You can get this example waveform via:\n",
"# curl -O https://storage.googleapis.com/audioset/speech_whistling2.wav\n",
"wav_data, sr = sf.read('speech_whistling2.wav', dtype=np.int16)\n",
"waveform = wav_data / 32768.0\n",
"# Sampling rate should be 16000 Hz.\n",
"assert sr == 16000"
]
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count": 4,
"execution_count": 4,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment