Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
a44be35f
Commit
a44be35f
authored
Oct 08, 2021
by
A. Unique TensorFlower
Committed by
saberkun
Oct 08, 2021
Browse files
Internal change
PiperOrigin-RevId: 401839863
parent
21ce83d8
Changes
42
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
50 deletions
+26
-50
official/vision/beta/projects/yt8m/tasks/yt8m_task.py
official/vision/beta/projects/yt8m/tasks/yt8m_task.py
+24
-9
official/vision/beta/projects/yt8m/train.py
official/vision/beta/projects/yt8m/train.py
+2
-41
No files found.
official/vision/beta/projects/yt8m/tasks/yt8m_task.py
View file @
a44be35f
...
...
@@ -24,7 +24,7 @@ from official.vision.beta.projects.yt8m.configs import yt8m as yt8m_cfg
from
official.vision.beta.projects.yt8m.dataloaders
import
yt8m_input
from
official.vision.beta.projects.yt8m.eval_utils
import
eval_util
from
official.vision.beta.projects.yt8m.modeling
import
yt8m_model_utils
as
utils
from
official.vision.beta.projects.yt8m.modeling.yt8m_model
import
YT8M
Model
from
official.vision.beta.projects.yt8m.modeling.yt8m_model
import
Dbof
Model
@
task_factory
.
register_task_cls
(
yt8m_cfg
.
YT8MTask
)
...
...
@@ -40,13 +40,26 @@ class YT8MTask(base_task.Task):
input_specs
=
tf
.
keras
.
layers
.
InputSpec
(
shape
=
[
None
]
+
common_input_shape
)
logging
.
info
(
'Build model input %r'
,
common_input_shape
)
l2_weight_decay
=
self
.
task_config
.
losses
.
l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer
=
(
tf
.
keras
.
regularizers
.
l2
(
l2_weight_decay
/
2.0
)
if
l2_weight_decay
else
None
)
# Model configuration.
model_config
=
self
.
task_config
.
model
model
=
YT8MModel
(
input_params
=
model_config
,
norm_activation_config
=
model_config
.
norm_activation
model
=
DbofModel
(
params
=
model_config
,
input_specs
=
input_specs
,
num_frames
=
train_cfg
.
num_frames
,
num_classes
=
train_cfg
.
num_classes
)
num_classes
=
train_cfg
.
num_classes
,
activation
=
norm_activation_config
.
activation
,
use_sync_bn
=
norm_activation_config
.
use_sync_bn
,
norm_momentum
=
norm_activation_config
.
norm_momentum
,
norm_epsilon
=
norm_activation_config
.
norm_epsilon
,
kernel_regularizer
=
l2_regularizer
)
return
model
def
build_inputs
(
self
,
params
:
yt8m_cfg
.
DataConfig
,
input_context
=
None
):
...
...
@@ -163,9 +176,10 @@ class YT8MTask(base_task.Task):
num_frames
=
tf
.
cast
(
num_frames
,
tf
.
float32
)
sample_frames
=
self
.
task_config
.
train_data
.
num_frames
if
self
.
task_config
.
model
.
sample_random_frames
:
features
=
utils
.
S
ample
R
andom
F
rames
(
features
,
num_frames
,
sample_frames
)
features
=
utils
.
s
ample
_r
andom
_f
rames
(
features
,
num_frames
,
sample_frames
)
else
:
features
=
utils
.
SampleRandomSequence
(
features
,
num_frames
,
sample_frames
)
features
=
utils
.
sample_random_sequence
(
features
,
num_frames
,
sample_frames
)
num_replicas
=
tf
.
distribute
.
get_strategy
().
num_replicas_in_sync
with
tf
.
GradientTape
()
as
tape
:
...
...
@@ -237,9 +251,10 @@ class YT8MTask(base_task.Task):
# sample random frames (None, 5, 1152) -> (None, 30, 1152)
sample_frames
=
self
.
task_config
.
validation_data
.
num_frames
if
self
.
task_config
.
model
.
sample_random_frames
:
features
=
utils
.
S
ample
R
andom
F
rames
(
features
,
num_frames
,
sample_frames
)
features
=
utils
.
s
ample
_r
andom
_f
rames
(
features
,
num_frames
,
sample_frames
)
else
:
features
=
utils
.
SampleRandomSequence
(
features
,
num_frames
,
sample_frames
)
features
=
utils
.
sample_random_sequence
(
features
,
num_frames
,
sample_frames
)
outputs
=
self
.
inference_step
(
features
,
model
)
outputs
=
tf
.
nest
.
map_structure
(
lambda
x
:
tf
.
cast
(
x
,
tf
.
float32
),
outputs
)
...
...
@@ -276,7 +291,7 @@ class YT8MTask(base_task.Task):
predictions
=
step_logs
[
self
.
avg_prec_metric
.
name
][
1
])
return
state
def
reduce_aggregated_logs
(
self
,
aggregated_logs
):
def
reduce_aggregated_logs
(
self
,
aggregated_logs
,
global_step
=
None
):
avg_prec_metrics
=
self
.
avg_prec_metric
.
get
()
self
.
avg_prec_metric
.
clear
()
return
avg_prec_metrics
official/vision/beta/projects/yt8m/train.py
View file @
a44be35f
...
...
@@ -15,54 +15,15 @@
"""YT8M model training driver."""
from
absl
import
app
from
absl
import
flags
import
gin
from
official.common
import
distribute_utils
from
official.common
import
flags
as
tfm_flags
from
official.core
import
task_factory
from
official.core
import
train_lib
from
official.core
import
train_utils
from
official.modeling
import
performance
from
official.vision.beta
import
train
# pylint: disable=unused-import
from
official.vision.beta.projects.yt8m.configs
import
yt8m
from
official.vision.beta.projects.yt8m.tasks
import
yt8m_task
# pylint: enable=unused-import
FLAGS
=
flags
.
FLAGS
def
main
(
_
):
gin
.
parse_config_files_and_bindings
(
FLAGS
.
gin_file
,
FLAGS
.
gin_params
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
model_dir
=
FLAGS
.
model_dir
if
'train'
in
FLAGS
.
mode
:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils
.
serialize_config
(
params
,
model_dir
)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if
params
.
runtime
.
mixed_precision_dtype
:
performance
.
set_mixed_precision_policy
(
params
.
runtime
.
mixed_precision_dtype
)
distribution_strategy
=
distribute_utils
.
get_distribution_strategy
(
distribution_strategy
=
params
.
runtime
.
distribution_strategy
,
all_reduce_alg
=
params
.
runtime
.
all_reduce_alg
,
num_gpus
=
params
.
runtime
.
num_gpus
,
tpu_address
=
params
.
runtime
.
tpu
)
with
distribution_strategy
.
scope
():
task
=
task_factory
.
get_task
(
params
.
task
,
logging_dir
=
model_dir
)
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
FLAGS
.
mode
,
params
=
params
,
model_dir
=
model_dir
)
if
__name__
==
'__main__'
:
tfm_flags
.
define_flags
()
app
.
run
(
main
)
app
.
run
(
train
.
main
)
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment