Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
dcuai
dlexamples
Commits
ee3997b3
Commit
ee3997b3
authored
Apr 15, 2022
by
qianyj
Browse files
new tf branch for dtk21.10.1
parent
2795dc1f
Changes
383
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2305 additions
and
0 deletions
+2305
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/core/train_utils.py
...Classification/models-master/official/core/train_utils.py
+478
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/core/train_utils_test.py
...ification/models-master/official/core/train_utils_test.py
+98
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/__init__.py
...lassification/models-master/official/modeling/__init__.py
+14
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/__init__.py
...n/models-master/official/modeling/activations/__init__.py
+21
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/gelu.py
...ation/models-master/official/modeling/activations/gelu.py
+32
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/gelu_test.py
.../models-master/official/modeling/activations/gelu_test.py
+34
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/relu.py
...ation/models-master/official/modeling/activations/relu.py
+31
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/relu_test.py
.../models-master/official/modeling/activations/relu_test.py
+35
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/sigmoid.py
...on/models-master/official/modeling/activations/sigmoid.py
+31
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/sigmoid_test.py
...dels-master/official/modeling/activations/sigmoid_test.py
+40
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/swish.py
...tion/models-master/official/modeling/activations/swish.py
+72
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/swish_test.py
...models-master/official/modeling/activations/swish_test.py
+44
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide.py
.../modeling/fast_training/experimental/tf2_utils_2x_wide.py
+186
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide_test.py
...ling/fast_training/experimental/tf2_utils_2x_wide_test.py
+101
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/policies.py
...r/official/modeling/fast_training/progressive/policies.py
+178
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train.py
...ster/official/modeling/fast_training/progressive/train.py
+69
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train_lib.py
.../official/modeling/fast_training/progressive/train_lib.py
+126
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train_lib_test.py
...cial/modeling/fast_training/progressive/train_lib_test.py
+183
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/trainer.py
...er/official/modeling/fast_training/progressive/trainer.py
+294
-0
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/trainer_test.py
...ficial/modeling/fast_training/progressive/trainer_test.py
+238
-0
No files found.
Too many changes to show.
To preserve performance only
383 of 383+
files are displayed.
Plain diff
Email patch
TensorFlow2x/ComputeVision/Classification/models-master/official/core/train_utils.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utils."""
import
copy
import
json
import
os
import
pprint
from
typing
import
Any
,
Callable
,
Dict
,
List
,
Optional
,
Union
from
absl
import
logging
import
dataclasses
import
gin
import
orbit
import
tensorflow
as
tf
# pylint: disable=g-direct-tensorflow-import
from
tensorflow.python.framework.convert_to_constants
import
convert_variables_to_constants_v2_as_graph
# pylint: enable=g-direct-tensorflow-import
from
official.core
import
base_task
from
official.core
import
base_trainer
from
official.core
import
config_definitions
from
official.core
import
exp_factory
from
official.modeling
import
hyperparams
def
get_leaf_nested_dict
(
d
:
Dict
[
str
,
Any
],
keys
:
List
[
str
])
->
Dict
[
str
,
Any
]:
"""Get leaf from a dictionary with arbitrary depth with a list of keys.
Args:
d: The dictionary to extract value from.
keys: The list of keys to extract values recursively.
Returns:
The value of the leaf.
Raises:
KeyError: If the value of keys extracted is a dictionary.
"""
leaf
=
d
for
k
in
keys
:
if
not
isinstance
(
leaf
,
dict
)
or
k
not
in
leaf
:
raise
KeyError
(
'Path not exist while traversing the dictionary: d with keys'
': %s.'
%
keys
)
leaf
=
leaf
[
k
]
if
isinstance
(
leaf
,
dict
):
raise
KeyError
(
'The value extracted with keys: %s is not a leaf of the '
'dictionary: %s.'
%
(
keys
,
d
))
return
leaf
def
cast_leaf_nested_dict
(
d
:
Dict
[
str
,
Any
],
cast_fn
:
Callable
[[
Any
],
Any
])
->
Dict
[
str
,
Any
]:
"""Cast the leaves of a dictionary with arbitrary depth in place.
Args:
d: The dictionary to extract value from.
cast_fn: The casting function.
Returns:
A dictionray with the same structure as d.
"""
for
key
,
value
in
d
.
items
():
if
isinstance
(
value
,
dict
):
d
[
key
]
=
cast_leaf_nested_dict
(
value
,
cast_fn
)
else
:
d
[
key
]
=
cast_fn
(
value
)
return
d
def
maybe_create_best_ckpt_exporter
(
params
:
config_definitions
.
ExperimentConfig
,
data_dir
:
str
)
->
Any
:
"""Maybe create a BestCheckpointExporter object, according to the config."""
export_subdir
=
params
.
trainer
.
best_checkpoint_export_subdir
metric_name
=
params
.
trainer
.
best_checkpoint_eval_metric
metric_comp
=
params
.
trainer
.
best_checkpoint_metric_comp
if
data_dir
and
export_subdir
and
metric_name
:
best_ckpt_dir
=
os
.
path
.
join
(
data_dir
,
export_subdir
)
best_ckpt_exporter
=
BestCheckpointExporter
(
best_ckpt_dir
,
metric_name
,
metric_comp
)
logging
.
info
(
'Created the best checkpoint exporter. '
'data_dir: %s, export_subdir: %s, metric_name: %s'
,
data_dir
,
export_subdir
,
metric_name
)
else
:
best_ckpt_exporter
=
None
return
best_ckpt_exporter
# TODO(b/180147589): Add tests for this module.
class
BestCheckpointExporter
:
"""Keeps track of the best result, and saves its checkpoint.
Orbit will support an API for checkpoint exporter. This class will be used
together with orbit once this functionality is ready.
"""
def
__init__
(
self
,
export_dir
:
str
,
metric_name
:
str
,
metric_comp
:
str
):
"""Initialization.
Args:
export_dir: The directory that will contain exported checkpoints.
metric_name: Indicates which metric to look at, when determining which
result is better. If eval_logs being passed to maybe_export_checkpoint
is a nested dictionary, use `|` as a seperator for different layers.
metric_comp: Indicates how to compare results. Either `lower` or `higher`.
"""
self
.
_export_dir
=
export_dir
self
.
_metric_name
=
metric_name
.
split
(
'|'
)
self
.
_metric_comp
=
metric_comp
if
self
.
_metric_comp
not
in
(
'lower'
,
'higher'
):
raise
ValueError
(
'best checkpoint metric comp must be one of '
'higher, lower. Got: {}'
.
format
(
self
.
_metric_comp
))
tf
.
io
.
gfile
.
makedirs
(
os
.
path
.
dirname
(
self
.
best_ckpt_logs_path
))
self
.
_best_ckpt_logs
=
self
.
_maybe_load_best_eval_metric
()
self
.
_checkpoint_manager
=
None
def
_get_checkpoint_manager
(
self
,
checkpoint
):
"""Gets an existing checkpoint manager or creates a new one."""
if
self
.
_checkpoint_manager
is
None
or
(
self
.
_checkpoint_manager
.
checkpoint
!=
checkpoint
):
logging
.
info
(
'Creates a new checkpoint manager.'
)
self
.
_checkpoint_manager
=
tf
.
train
.
CheckpointManager
(
checkpoint
,
directory
=
self
.
_export_dir
,
max_to_keep
=
1
,
checkpoint_name
=
'best_ckpt'
)
return
self
.
_checkpoint_manager
def
maybe_export_checkpoint
(
self
,
checkpoint
,
eval_logs
,
global_step
,
write_logs
=
True
)
->
bool
:
"""Compare eval_logs with past eval_logs and export checkpoint if better."""
logging
.
info
(
'[BestCheckpointExporter] received eval_logs: %s, at step: %d'
,
eval_logs
,
global_step
)
if
self
.
_best_ckpt_logs
is
None
or
self
.
_new_metric_is_better
(
self
.
_best_ckpt_logs
,
eval_logs
):
self
.
_best_ckpt_logs
=
eval_logs
if
write_logs
:
self
.
export_best_eval_metric
(
self
.
_best_ckpt_logs
,
global_step
)
self
.
_get_checkpoint_manager
(
checkpoint
).
save
()
return
True
return
False
def
_maybe_load_best_eval_metric
(
self
):
if
not
tf
.
io
.
gfile
.
exists
(
self
.
best_ckpt_logs_path
):
return
None
with
tf
.
io
.
gfile
.
GFile
(
self
.
best_ckpt_logs_path
,
'r'
)
as
reader
:
return
json
.
loads
(
reader
.
read
())
def
_new_metric_is_better
(
self
,
old_logs
,
new_logs
):
"""Check if the metric in new_logs is better than the metric in old_logs."""
old_value
=
float
(
orbit
.
utils
.
get_value
(
get_leaf_nested_dict
(
old_logs
,
self
.
_metric_name
)))
new_value
=
float
(
orbit
.
utils
.
get_value
(
get_leaf_nested_dict
(
new_logs
,
self
.
_metric_name
)))
logging
.
info
(
'[BestCheckpointExporter] comparing results. old: %f, new: %f'
,
old_value
,
new_value
)
if
self
.
_metric_comp
==
'higher'
:
if
new_value
>
old_value
:
logging
.
info
(
'[BestCheckpointExporter] '
'the new number is better since it is higher.'
)
return
True
else
:
# self._metric_comp == 'lower':
if
new_value
<
old_value
:
logging
.
info
(
'[BestCheckpointExporter] '
'the new number is better since it is lower.'
)
return
True
return
False
def
export_best_eval_metric
(
self
,
eval_logs
,
global_step
):
"""Export evaluation results of the best checkpoint into a json file."""
eval_logs_ext
=
copy
.
copy
(
eval_logs
)
eval_logs_ext
[
'best_ckpt_global_step'
]
=
global_step
eval_logs_ext
=
cast_leaf_nested_dict
(
eval_logs_ext
,
lambda
x
:
float
(
orbit
.
utils
.
get_value
(
x
)))
# Saving json file is very fast.
with
tf
.
io
.
gfile
.
GFile
(
self
.
best_ckpt_logs_path
,
'w'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
eval_logs_ext
,
indent
=
4
)
+
'
\n
'
)
@
property
def
best_ckpt_logs
(
self
):
return
self
.
_best_ckpt_logs
@
property
def
best_ckpt_logs_path
(
self
):
return
os
.
path
.
join
(
self
.
_export_dir
,
'info.json'
)
@
property
def
best_ckpt_path
(
self
):
"""Returns the best ckpt path or None if there is no ckpt yet."""
return
tf
.
train
.
latest_checkpoint
(
self
.
_export_dir
)
@
gin
.
configurable
def
create_trainer
(
params
:
config_definitions
.
ExperimentConfig
,
task
:
base_task
.
Task
,
train
:
bool
,
evaluate
:
bool
,
checkpoint_exporter
:
Optional
[
BestCheckpointExporter
]
=
None
,
trainer_cls
=
base_trainer
.
Trainer
)
->
base_trainer
.
Trainer
:
"""Create trainer."""
logging
.
info
(
'Running default trainer.'
)
model
=
task
.
build_model
()
optimizer
=
task
.
create_optimizer
(
params
.
trainer
.
optimizer_config
,
params
.
runtime
)
return
trainer_cls
(
params
,
task
,
model
=
model
,
optimizer
=
optimizer
,
train
=
train
,
evaluate
=
evaluate
,
checkpoint_exporter
=
checkpoint_exporter
)
@
dataclasses
.
dataclass
class
ParseConfigOptions
:
"""Use this dataclass instead of FLAGS to customize parse_configuration()."""
experiment
:
str
config_file
:
List
[
str
]
tpu
:
str
=
''
tf_data_service
:
str
=
''
params_override
:
str
=
''
def
__contains__
(
self
,
name
):
return
name
in
dataclasses
.
asdict
(
self
)
def
parse_configuration
(
flags_obj
,
lock_return
=
True
,
print_return
=
True
):
"""Parses ExperimentConfig from flags."""
if
flags_obj
.
experiment
is
None
:
raise
ValueError
(
'The flag --experiment must be specified.'
)
# 1. Get the default config from the registered experiment.
params
=
exp_factory
.
get_exp_config
(
flags_obj
.
experiment
)
# 2. Get the first level of override from `--config_file`.
# `--config_file` is typically used as a template that specifies the common
# override for a particular experiment.
for
config_file
in
flags_obj
.
config_file
or
[]:
params
=
hyperparams
.
override_params_dict
(
params
,
config_file
,
is_strict
=
True
)
# 3. Override the TPU address and tf.data service address.
params
.
override
({
'runtime'
:
{
'tpu'
:
flags_obj
.
tpu
,
},
})
if
(
'tf_data_service'
in
flags_obj
and
flags_obj
.
tf_data_service
and
isinstance
(
params
.
task
,
config_definitions
.
TaskConfig
)):
params
.
override
({
'task'
:
{
'train_data'
:
{
'tf_data_service_address'
:
flags_obj
.
tf_data_service
,
},
'validation_data'
:
{
'tf_data_service_address'
:
flags_obj
.
tf_data_service
,
}
}
})
# 4. Get the second level of override from `--params_override`.
# `--params_override` is typically used as a further override over the
# template. For example, one may define a particular template for training
# ResNet50 on ImageNet in a config file and pass it via `--config_file`,
# then define different learning rates and pass it via `--params_override`.
if
flags_obj
.
params_override
:
params
=
hyperparams
.
override_params_dict
(
params
,
flags_obj
.
params_override
,
is_strict
=
True
)
params
.
validate
()
if
lock_return
:
params
.
lock
()
if
print_return
:
pp
=
pprint
.
PrettyPrinter
()
logging
.
info
(
'Final experiment parameters:
\n
%s'
,
pp
.
pformat
(
params
.
as_dict
()))
return
params
def
serialize_config
(
params
:
config_definitions
.
ExperimentConfig
,
model_dir
:
str
):
"""Serializes and saves the experiment config."""
if
model_dir
is
None
:
raise
ValueError
(
'model_dir must be specified, but got None'
)
params_save_path
=
os
.
path
.
join
(
model_dir
,
'params.yaml'
)
logging
.
info
(
'Saving experiment configuration to %s'
,
params_save_path
)
tf
.
io
.
gfile
.
makedirs
(
model_dir
)
hyperparams
.
save_params_dict_to_yaml
(
params
,
params_save_path
)
def
save_gin_config
(
filename_suffix
:
str
,
model_dir
:
str
):
"""Serializes and saves the experiment config."""
gin_save_path
=
os
.
path
.
join
(
model_dir
,
'operative_config.{}.gin'
.
format
(
filename_suffix
))
logging
.
info
(
'Saving gin configurations to %s'
,
gin_save_path
)
tf
.
io
.
gfile
.
makedirs
(
model_dir
)
with
tf
.
io
.
gfile
.
GFile
(
gin_save_path
,
'w'
)
as
f
:
f
.
write
(
gin
.
operative_config_str
())
def
read_global_step_from_checkpoint
(
ckpt_file_path
):
"""Read global step from checkpoint, or get global step from its filename."""
global_step
=
tf
.
Variable
(
-
1
,
dtype
=
tf
.
int64
)
ckpt
=
tf
.
train
.
Checkpoint
(
global_step
=
global_step
)
try
:
ckpt
.
restore
(
ckpt_file_path
).
expect_partial
()
global_step_maybe_restored
=
global_step
.
numpy
()
except
tf
.
errors
.
InvalidArgumentError
:
global_step_maybe_restored
=
-
1
if
global_step_maybe_restored
==
-
1
:
raise
ValueError
(
'global_step not found in checkpoint {}. '
'If you want to run finetune eval jobs, you need to '
'make sure that your pretrain model writes '
'global_step in its checkpoints.'
.
format
(
ckpt_file_path
))
global_step_restored
=
global_step
.
numpy
()
logging
.
info
(
'get global_step %d from checkpoint %s'
,
global_step_restored
,
ckpt_file_path
)
return
global_step_restored
def
write_json_summary
(
log_dir
,
global_step
,
eval_metrics
):
"""Dump evaluation metrics to json file."""
serializable_dict
=
{}
for
name
,
value
in
eval_metrics
.
items
():
if
hasattr
(
value
,
'numpy'
):
serializable_dict
[
name
]
=
str
(
value
.
numpy
())
else
:
serializable_dict
[
name
]
=
str
(
value
)
output_json
=
os
.
path
.
join
(
log_dir
,
'metrics-{}.json'
.
format
(
global_step
))
logging
.
info
(
'Evaluation results at pretrain step %d: %s'
,
global_step
,
serializable_dict
)
with
tf
.
io
.
gfile
.
GFile
(
output_json
,
'w'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
serializable_dict
,
indent
=
4
)
+
'
\n
'
)
def
write_summary
(
summary_writer
,
global_step
,
eval_metrics
):
"""Write evaluation metrics to TF summary."""
numeric_dict
=
{}
for
name
,
value
in
eval_metrics
.
items
():
numeric_dict
[
name
]
=
float
(
orbit
.
utils
.
get_value
(
value
))
with
summary_writer
.
as_default
():
for
name
,
value
in
numeric_dict
.
items
():
tf
.
summary
.
scalar
(
name
,
value
,
step
=
global_step
)
summary_writer
.
flush
()
def
remove_ckpts
(
model_dir
):
"""Remove model checkpoints, so we can restart."""
ckpts
=
os
.
path
.
join
(
model_dir
,
'ckpt-*'
)
logging
.
info
(
'removing checkpoint files %s'
,
ckpts
)
for
file_to_remove
in
tf
.
io
.
gfile
.
glob
(
ckpts
):
tf
.
io
.
gfile
.
rmtree
(
file_to_remove
)
file_to_remove
=
os
.
path
.
join
(
model_dir
,
'checkpoint'
)
if
tf
.
io
.
gfile
.
exists
(
file_to_remove
):
tf
.
io
.
gfile
.
remove
(
file_to_remove
)
def
write_model_params
(
model
:
Union
[
tf
.
Module
,
tf
.
keras
.
Model
],
output_path
:
str
)
->
None
:
"""Writes the model parameters and shapes to a file.
Args:
model: A model instance.
output_path: Output file path.
"""
with
tf
.
io
.
gfile
.
GFile
(
output_path
,
'w'
)
as
f
:
total_params
=
0
for
var
in
model
.
variables
:
shape
=
tf
.
shape
(
var
)
total_params
+=
tf
.
math
.
reduce_prod
(
shape
).
numpy
()
f
.
write
(
f
'
{
var
.
name
}
{
shape
.
numpy
().
tolist
()
}
\n
'
)
f
.
write
(
f
'
\n
Total params:
{
total_params
}
\n
'
)
def
try_count_params
(
model
:
Union
[
tf
.
Module
,
tf
.
keras
.
Model
],
trainable_only
:
bool
=
False
):
"""Count the number of parameters if model is possible.
Args:
model: Try to count the number of params in this model.
trainable_only: Whether to calculate trainable params only. This flag is
not used when the model has `count_params` attribute.
Returns:
The number of parameters or None.
"""
if
hasattr
(
model
,
'count_params'
):
try
:
return
model
.
count_params
()
except
ValueError
:
logging
.
info
(
'Number of trainable params unknown, because the build() '
'methods in keras layers were not called. This is probably '
'because the model was not feed any input, e.g., the max '
'train step already reached before this run.'
)
return
None
else
:
total_params
=
0
variables
=
model
.
trainable_variables
if
trainable_only
else
model
.
variables
for
var
in
variables
:
shape
=
tf
.
shape
(
var
)
total_params
+=
tf
.
math
.
reduce_prod
(
shape
).
numpy
()
return
total_params
def
try_count_flops
(
model
:
Union
[
tf
.
Module
,
tf
.
keras
.
Model
],
inputs_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
None
,
output_path
:
Optional
[
str
]
=
None
):
"""Counts and returns model FLOPs.
Args:
model: A model instance.
inputs_kwargs: An optional dictionary of argument pairs specifying inputs'
shape specifications to getting corresponding concrete function.
output_path: A file path to write the profiling results to.
Returns:
The model's FLOPs.
"""
if
hasattr
(
model
,
'inputs'
):
try
:
# Get input shape and set batch size to 1.
if
model
.
inputs
:
inputs
=
[
tf
.
TensorSpec
([
1
]
+
input
.
shape
[
1
:],
input
.
dtype
)
for
input
in
model
.
inputs
]
concrete_func
=
tf
.
function
(
model
).
get_concrete_function
(
inputs
)
# If model.inputs is invalid, try to use the input to get concrete
# function for model.call (subclass model).
else
:
concrete_func
=
tf
.
function
(
model
.
call
).
get_concrete_function
(
**
inputs_kwargs
)
frozen_func
,
_
=
convert_variables_to_constants_v2_as_graph
(
concrete_func
)
# Calculate FLOPs.
run_meta
=
tf
.
compat
.
v1
.
RunMetadata
()
opts
=
tf
.
compat
.
v1
.
profiler
.
ProfileOptionBuilder
.
float_operation
()
if
output_path
is
not
None
:
opts
[
'output'
]
=
f
'file:outfile=
{
output_path
}
'
else
:
opts
[
'output'
]
=
'none'
flops
=
tf
.
compat
.
v1
.
profiler
.
profile
(
graph
=
frozen_func
.
graph
,
run_meta
=
run_meta
,
options
=
opts
)
return
flops
.
total_float_ops
except
Exception
as
e
:
# pylint: disable=broad-except
logging
.
info
(
'Failed to count model FLOPs with error %s, because the build() '
'methods in keras layers were not called. This is probably because '
'the model was not feed any input, e.g., the max train step already '
'reached before this run.'
,
e
)
return
None
return
None
TensorFlow2x/ComputeVision/Classification/models-master/official/core/train_utils_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.train_utils."""
import
os
import
numpy
as
np
import
tensorflow
as
tf
from
official.core
import
test_utils
from
official.core
import
train_utils
class
TrainUtilsTest
(
tf
.
test
.
TestCase
):
def
test_get_leaf_nested_dict
(
self
):
d
=
{
'a'
:
{
'i'
:
{
'x'
:
5
}}}
self
.
assertEqual
(
train_utils
.
get_leaf_nested_dict
(
d
,
[
'a'
,
'i'
,
'x'
]),
5
)
def
test_get_leaf_nested_dict_not_leaf
(
self
):
with
self
.
assertRaisesRegex
(
KeyError
,
'The value extracted with keys.*'
):
d
=
{
'a'
:
{
'i'
:
{
'x'
:
5
}}}
train_utils
.
get_leaf_nested_dict
(
d
,
[
'a'
,
'i'
])
def
test_get_leaf_nested_dict_path_not_exist_missing_key
(
self
):
with
self
.
assertRaisesRegex
(
KeyError
,
'Path not exist while traversing .*'
):
d
=
{
'a'
:
{
'i'
:
{
'x'
:
5
}}}
train_utils
.
get_leaf_nested_dict
(
d
,
[
'a'
,
'i'
,
'y'
])
def
test_get_leaf_nested_dict_path_not_exist_out_of_range
(
self
):
with
self
.
assertRaisesRegex
(
KeyError
,
'Path not exist while traversing .*'
):
d
=
{
'a'
:
{
'i'
:
{
'x'
:
5
}}}
train_utils
.
get_leaf_nested_dict
(
d
,
[
'a'
,
'i'
,
'z'
])
def
test_get_leaf_nested_dict_path_not_exist_meets_leaf
(
self
):
with
self
.
assertRaisesRegex
(
KeyError
,
'Path not exist while traversing .*'
):
d
=
{
'a'
:
{
'i'
:
5
}}
train_utils
.
get_leaf_nested_dict
(
d
,
[
'a'
,
'i'
,
'z'
])
def
test_cast_leaf_nested_dict
(
self
):
d
=
{
'a'
:
{
'i'
:
{
'x'
:
'123'
}},
'b'
:
456.5
}
d
=
train_utils
.
cast_leaf_nested_dict
(
d
,
int
)
self
.
assertEqual
(
d
[
'a'
][
'i'
][
'x'
],
123
)
self
.
assertEqual
(
d
[
'b'
],
456
)
def
test_write_model_params_keras_model
(
self
):
inputs
=
np
.
zeros
([
2
,
3
])
model
=
test_utils
.
FakeKerasModel
()
model
(
inputs
)
# Must do forward pass to build the model.
filepath
=
os
.
path
.
join
(
self
.
create_tempdir
(),
'model_params.txt'
)
train_utils
.
write_model_params
(
model
,
filepath
)
actual
=
tf
.
io
.
gfile
.
GFile
(
filepath
,
'r'
).
read
().
splitlines
()
expected
=
[
'fake_keras_model/dense/kernel:0 [3, 4]'
,
'fake_keras_model/dense/bias:0 [4]'
,
'fake_keras_model/dense_1/kernel:0 [4, 4]'
,
'fake_keras_model/dense_1/bias:0 [4]'
,
''
,
'Total params: 36'
,
]
self
.
assertEqual
(
actual
,
expected
)
def
test_write_model_params_module
(
self
):
inputs
=
np
.
zeros
([
2
,
3
],
dtype
=
np
.
float32
)
model
=
test_utils
.
FakeModule
(
3
,
name
=
'fake_module'
)
model
(
inputs
)
# Must do forward pass to build the model.
filepath
=
os
.
path
.
join
(
self
.
create_tempdir
(),
'model_params.txt'
)
train_utils
.
write_model_params
(
model
,
filepath
)
actual
=
tf
.
io
.
gfile
.
GFile
(
filepath
,
'r'
).
read
().
splitlines
()
expected
=
[
'fake_module/dense/b:0 [4]'
,
'fake_module/dense/w:0 [3, 4]'
,
'fake_module/dense_1/b:0 [4]'
,
'fake_module/dense_1/w:0 [4, 4]'
,
''
,
'Total params: 36'
,
]
self
.
assertEqual
(
actual
,
expected
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/__init__.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/__init__.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activations package definition."""
from
official.modeling.activations.gelu
import
gelu
from
official.modeling.activations.relu
import
relu6
from
official.modeling.activations.sigmoid
import
hard_sigmoid
from
official.modeling.activations.swish
import
hard_swish
from
official.modeling.activations.swish
import
identity
from
official.modeling.activations.swish
import
simple_swish
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/gelu.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian error linear unit."""
import
tensorflow
as
tf
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
gelu
(
x
):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
return
tf
.
keras
.
activations
.
gelu
(
x
,
approximate
=
True
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/gelu_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Gaussian error linear unit."""
import
tensorflow
as
tf
from
tensorflow.python.keras
import
keras_parameterized
# pylint: disable=g-direct-tensorflow-import
from
official.modeling
import
activations
@
keras_parameterized
.
run_all_keras_modes
class
GeluTest
(
keras_parameterized
.
TestCase
):
def
test_gelu
(
self
):
expected_data
=
[[
0.14967535
,
0.
,
-
0.10032465
],
[
-
0.15880796
,
-
0.04540223
,
2.9963627
]]
gelu_data
=
activations
.
gelu
([[.
25
,
0
,
-
.
25
],
[
-
1
,
-
2
,
3
]])
self
.
assertAllClose
(
expected_data
,
gelu_data
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/relu.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized Relu activation."""
import
tensorflow
as
tf
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
relu6
(
features
):
"""Computes the Relu6 activation function.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features
=
tf
.
convert_to_tensor
(
features
)
return
tf
.
nn
.
relu6
(
features
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/relu_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the customized Relu activation."""
import
tensorflow
as
tf
from
tensorflow.python.keras
import
\
keras_parameterized
# pylint: disable=g-direct-tensorflow-import
from
official.modeling
import
activations
@
keras_parameterized
.
run_all_keras_modes
class
CustomizedReluTest
(
keras_parameterized
.
TestCase
):
def
test_relu6
(
self
):
features
=
[[.
25
,
0
,
-
.
25
],
[
-
1
,
-
2
,
3
]]
customized_relu6_data
=
activations
.
relu6
(
features
)
relu6_data
=
tf
.
nn
.
relu6
(
features
)
self
.
assertAllClose
(
customized_relu6_data
,
relu6_data
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/sigmoid.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized Sigmoid activation."""
import
tensorflow
as
tf
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
hard_sigmoid
(
features
):
"""Computes the hard sigmoid activation function.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features
=
tf
.
convert_to_tensor
(
features
)
return
tf
.
nn
.
relu6
(
features
+
tf
.
cast
(
3.
,
features
.
dtype
))
*
0.16667
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/sigmoid_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the customized Sigmoid activation."""
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.python.keras
import
\
keras_parameterized
# pylint: disable=g-direct-tensorflow-import
from
official.modeling
import
activations
@
keras_parameterized
.
run_all_keras_modes
class
CustomizedSigmoidTest
(
keras_parameterized
.
TestCase
):
def
_hard_sigmoid_nn
(
self
,
x
):
x
=
np
.
float32
(
x
)
return
tf
.
nn
.
relu6
(
x
+
3.
)
*
0.16667
def
test_hard_sigmoid
(
self
):
features
=
[[.
25
,
0
,
-
.
25
],
[
-
1
,
-
2
,
3
]]
customized_hard_sigmoid_data
=
activations
.
hard_sigmoid
(
features
)
sigmoid_data
=
self
.
_hard_sigmoid_nn
(
features
)
self
.
assertAllClose
(
customized_hard_sigmoid_data
,
sigmoid_data
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/swish.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized Swish activation."""
import
tensorflow
as
tf
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
simple_swish
(
features
):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features
=
tf
.
convert_to_tensor
(
features
)
return
features
*
tf
.
nn
.
sigmoid
(
features
)
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
hard_swish
(
features
):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features
=
tf
.
convert_to_tensor
(
features
)
fdtype
=
features
.
dtype
return
features
*
tf
.
nn
.
relu6
(
features
+
tf
.
cast
(
3.
,
fdtype
))
*
(
1.
/
6.
)
@
tf
.
keras
.
utils
.
register_keras_serializable
(
package
=
'Text'
)
def
identity
(
features
):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features
=
tf
.
convert_to_tensor
(
features
)
return
tf
.
identity
(
features
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/activations/swish_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the customized Swish activation."""
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.python.keras
import
keras_parameterized
# pylint: disable=g-direct-tensorflow-import
from
official.modeling
import
activations
@
keras_parameterized
.
run_all_keras_modes
class
CustomizedSwishTest
(
keras_parameterized
.
TestCase
):
def
_hard_swish_np
(
self
,
x
):
x
=
np
.
float32
(
x
)
return
x
*
np
.
clip
(
x
+
3
,
0
,
6
)
/
6
def
test_simple_swish
(
self
):
features
=
[[.
25
,
0
,
-
.
25
],
[
-
1
,
-
2
,
3
]]
customized_swish_data
=
activations
.
simple_swish
(
features
)
swish_data
=
tf
.
nn
.
swish
(
features
)
self
.
assertAllClose
(
customized_swish_data
,
swish_data
)
def
test_hard_swish
(
self
):
features
=
[[.
25
,
0
,
-
.
25
],
[
-
1
,
-
2
,
3
]]
customized_swish_data
=
activations
.
hard_swish
(
features
)
swish_data
=
self
.
_hard_swish_np
(
features
)
self
.
assertAllClose
(
customized_swish_data
,
swish_data
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stacking model horizontally."""
from
absl
import
logging
import
numpy
as
np
import
tensorflow
as
tf
def
expand_vector
(
v
:
np
.
ndarray
)
->
np
.
ndarray
:
"""Expands a vector with batch dimensions.
Equivalent to expand_1_axis(v, epsilon=0.0, axis=-1)
Args:
v: A vector with shape [..., a].
Returns:
A vector with shape [..., 2 * a].
"""
return
np
.
repeat
(
v
,
2
,
axis
=-
1
)
def
expand_1_axis
(
w
:
np
.
ndarray
,
epsilon
:
float
,
axis
:
int
)
->
np
.
ndarray
:
"""Expands either the first dimension or the last dimension of w.
If `axis = 0`, the following constraint will be satisfied:
matmul(x, w) ==
matmul(expand_vector(x), expand_1_axis(w, epsilon=0.1, axis=0))
If `axis = -1`, the following constraint will be satisfied if `epsilon = 0.0`:
expand_vector(matmul(x, w)) ==
2 * matmul(x, expand_1_axis(w, epsilon=0.0, axis=-1))
Args:
w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i].
epsilon: Symmetric Noise added to expanded tensor.
axis: Must be either 0 or -1.
Returns:
Expanded numpy array.
"""
assert
axis
in
(
0
,
-
1
),
(
"Only support expanding the first or the last dimension. "
"Got: {}"
.
format
(
axis
))
rank
=
len
(
w
.
shape
)
d_w
=
np
.
random
.
normal
(
np
.
zeros_like
(
w
),
np
.
fabs
(
w
)
*
epsilon
,
w
.
shape
)
d_w
=
np
.
repeat
(
d_w
,
2
,
axis
=
axis
)
sign_flip
=
np
.
array
([
1
,
-
1
])
for
_
in
range
(
rank
-
1
):
sign_flip
=
np
.
expand_dims
(
sign_flip
,
axis
=-
1
if
axis
==
0
else
0
)
sign_flip
=
np
.
tile
(
sign_flip
,
[
w
.
shape
[
0
]]
+
[
1
]
*
(
rank
-
2
)
+
[
w
.
shape
[
-
1
]])
d_w
*=
sign_flip
w_expand
=
(
np
.
repeat
(
w
,
2
,
axis
=
axis
)
+
d_w
)
/
2
return
w_expand
def
expand_2_axes
(
w
:
np
.
ndarray
,
epsilon
:
float
)
->
np
.
ndarray
:
"""Expands the first dimension and the last dimension of w.
The following constraint will be satisfied:
expand_vector(matmul(x, w)) == matmul(expand_vector(x), expand_2_axes(w))
Args:
w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i].
epsilon: Symmetric Noise added to expanded tensor.
Returns:
Expanded numpy array.
"""
rank
=
len
(
w
.
shape
)
d_w
=
np
.
random
.
normal
(
np
.
zeros_like
(
w
),
np
.
fabs
(
w
)
*
epsilon
,
w
.
shape
)
d_w
=
np
.
repeat
(
np
.
repeat
(
d_w
,
2
,
axis
=
0
),
2
,
axis
=-
1
)
sign_flip
=
np
.
array
([
1
,
-
1
])
for
_
in
range
(
rank
-
1
):
sign_flip
=
np
.
expand_dims
(
sign_flip
,
axis
=-
1
)
sign_flip
=
np
.
tile
(
sign_flip
,
[
w
.
shape
[
0
]]
+
[
1
]
*
(
rank
-
2
)
+
[
w
.
shape
[
-
1
]
*
2
])
d_w
*=
sign_flip
w_expand
=
(
np
.
repeat
(
np
.
repeat
(
w
,
2
,
axis
=
0
),
2
,
axis
=-
1
)
+
d_w
)
/
2
return
w_expand
def
var_to_var
(
var_from
:
tf
.
Variable
,
var_to
:
tf
.
Variable
,
epsilon
:
float
):
"""Expands a variable to another variable.
Assume the shape of `var_from` is (a, b, ..., y, z), the shape of `var_to`
can be (a, ..., z * 2), (a * 2, ..., z * 2), (a * 2, ..., z)
If the shape of `var_to` is (a, ..., 2 * z):
For any x, tf.matmul(x, var_to) ~= expand_vector(tf.matmul(x, var_from)) / 2
Not that there will be noise added to the left hand side, if epsilon != 0.
If the shape of `var_to` is (2 * a, ..., z):
For any x, tf.matmul(expand_vector(x), var_to) == tf.matmul(x, var_from)
If the shape of `var_to` is (2 * a, ..., 2 * z):
For any x, tf.matmul(expand_vector(x), var_to) ==
expand_vector(tf.matmul(expand_vector(x), var_from))
Args:
var_from: input variable to expand.
var_to: output variable.
epsilon: the noise ratio that will be added, when splitting `var_from`.
"""
shape_from
=
var_from
.
shape
shape_to
=
var_to
.
shape
if
shape_from
==
shape_to
:
var_to
.
assign
(
var_from
)
elif
len
(
shape_from
)
==
1
and
len
(
shape_to
)
==
1
:
var_to
.
assign
(
expand_vector
(
var_from
.
numpy
()))
elif
shape_from
[
0
]
*
2
==
shape_to
[
0
]
and
shape_from
[
-
1
]
==
shape_to
[
-
1
]:
var_to
.
assign
(
expand_1_axis
(
var_from
.
numpy
(),
epsilon
=
epsilon
,
axis
=
0
))
elif
shape_from
[
0
]
==
shape_to
[
0
]
and
shape_from
[
-
1
]
*
2
==
shape_to
[
-
1
]:
var_to
.
assign
(
expand_1_axis
(
var_from
.
numpy
(),
epsilon
=
epsilon
,
axis
=-
1
))
elif
shape_from
[
0
]
*
2
==
shape_to
[
0
]
and
shape_from
[
-
1
]
*
2
==
shape_to
[
-
1
]:
var_to
.
assign
(
expand_2_axes
(
var_from
.
numpy
(),
epsilon
=
epsilon
))
else
:
raise
ValueError
(
"Shape not supported, {}, {}"
.
format
(
shape_from
,
shape_to
))
def
model_to_model_2x_wide
(
model_from
:
tf
.
Module
,
model_to
:
tf
.
Module
,
epsilon
:
float
=
0.1
):
"""Expands a model to a wider version.
Also makes sure that the output of the model is not changed after expanding.
For example:
```
model_narrow = tf.keras.Sequential()
model_narrow.add(tf.keras.Input(shape=(3,)))
model_narrow.add(tf.keras.layers.Dense(4))
model_narrow.add(tf.keras.layers.Dense(1))
model_wide = tf.keras.Sequential()
model_wide.add(tf.keras.Input(shape=(6,)))
model_wide.add(tf.keras.layers.Dense(8))
model_wide.add(tf.keras.layers.Dense(1))
model_to_model_2x_wide(model_narrow, model_wide)
assert model_narrow([[1, 2, 3]]) == model_wide([[1, 1, 2, 2, 3, 3]])
```
We assume that `model_from` and `model_to` has the same architecture and only
widths of them differ.
Args:
model_from: input model to expand.
model_to: output model whose variables will be assigned expanded values
according to `model_from`.
epsilon: the noise ratio that will be added, when splitting `var_from`.
"""
for
w_from
,
w_to
in
zip
(
model_from
.
trainable_variables
,
model_to
.
trainable_variables
):
logging
.
info
(
"expanding %s %s to %s %s"
,
w_from
.
name
,
w_from
.
shape
,
w_to
.
name
,
w_to
.
shape
)
var_to_var
(
w_from
,
w_to
,
epsilon
=
epsilon
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf2_utils_2x_wide."""
import
numpy
as
np
import
tensorflow
as
tf
from
official.modeling.fast_training.experimental
import
tf2_utils_2x_wide
class
Tf2Utils2XWideTest
(
tf
.
test
.
TestCase
):
def
test_expand_vector
(
self
):
x
=
np
.
array
([
1
,
2
])
self
.
assertAllClose
(
tf2_utils_2x_wide
.
expand_vector
(
x
),
np
.
array
([
1
,
1
,
2
,
2
]))
def
test_expand_matrix
(
self
):
x
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
x
=
tf2_utils_2x_wide
.
expand_2_axes
(
x
,
epsilon
=
0.1
)
self
.
assertAllClose
(
x
[
0
,
:]
+
x
[
1
,
:],
np
.
array
([
1
,
1
,
2
,
2
]))
self
.
assertAllClose
(
x
[
2
,
:]
+
x
[
3
,
:],
np
.
array
([
3
,
3
,
4
,
4
]))
def
test_expand_matrix_axis_0
(
self
):
x
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
x
=
tf2_utils_2x_wide
.
expand_1_axis
(
x
,
axis
=
0
,
epsilon
=
0.1
)
self
.
assertAllClose
(
x
[
0
,
:]
+
x
[
1
,
:],
np
.
array
([
1
,
2
]))
self
.
assertAllClose
(
x
[
2
,
:]
+
x
[
3
,
:],
np
.
array
([
3
,
4
]))
def
test_expand_matrix_axis_1
(
self
):
x
=
np
.
array
([[
1
,
2
],
[
3
,
4
]])
x
=
tf2_utils_2x_wide
.
expand_1_axis
(
x
,
axis
=-
1
,
epsilon
=
0.1
)
self
.
assertAllClose
(
x
[:,
0
]
+
x
[:,
1
],
np
.
array
([
1
,
3
]))
self
.
assertAllClose
(
x
[:,
2
]
+
x
[:,
3
],
np
.
array
([
2
,
4
]))
def
test_expand_3d_tensor
(
self
):
x0
=
np
.
array
([
10
,
11
])
x1
=
np
.
array
([
10
,
10
,
11
,
11
])
w0
=
np
.
random
.
rand
(
2
,
2
)
w1
=
tf2_utils_2x_wide
.
expand_2_axes
(
w0
,
epsilon
=
0.1
)
o0
=
np
.
matmul
(
x0
,
w0
)
o1
=
np
.
matmul
(
x1
,
w1
)
self
.
assertAllClose
(
np
.
repeat
(
o0
,
2
,
axis
=-
1
),
o1
)
def
test_expand_3d_tensor_axis_0
(
self
):
x0
=
np
.
array
([
10
,
11
])
x1
=
np
.
array
([
10
,
10
,
11
,
11
])
w0
=
np
.
random
.
rand
(
2
,
2
)
w1
=
tf2_utils_2x_wide
.
expand_1_axis
(
w0
,
axis
=
0
,
epsilon
=
0.1
)
o0
=
np
.
matmul
(
x0
,
w0
)
o1
=
np
.
matmul
(
x1
,
w1
)
self
.
assertAllClose
(
o0
,
o1
)
def
test_expand_3d_tensor_axis_2
(
self
):
x
=
np
.
array
([
10
,
11
])
w0
=
np
.
random
.
rand
(
2
,
2
)
w1
=
tf2_utils_2x_wide
.
expand_1_axis
(
w0
,
axis
=-
1
,
epsilon
=
0.1
)
o0
=
np
.
matmul
(
x
,
w0
)
o1
=
np
.
matmul
(
x
,
w1
)
self
.
assertAllClose
(
o0
,
np
.
sum
(
o1
.
reshape
(
2
,
2
),
axis
=-
1
))
def
test_end_to_end
(
self
):
"""Covers expand_vector, expand_2_axes, and expand_1_axis."""
model_narrow
=
tf
.
keras
.
Sequential
()
model_narrow
.
add
(
tf
.
keras
.
Input
(
shape
=
(
3
,)))
model_narrow
.
add
(
tf
.
keras
.
layers
.
Dense
(
4
))
model_narrow
.
add
(
tf
.
keras
.
layers
.
Dense
(
4
))
model_narrow
.
add
(
tf
.
keras
.
layers
.
Dense
(
1
))
model_wide
=
tf
.
keras
.
Sequential
()
model_wide
.
add
(
tf
.
keras
.
Input
(
shape
=
(
6
,)))
model_wide
.
add
(
tf
.
keras
.
layers
.
Dense
(
8
))
model_wide
.
add
(
tf
.
keras
.
layers
.
Dense
(
8
))
model_wide
.
add
(
tf
.
keras
.
layers
.
Dense
(
1
))
x0
=
np
.
array
([[
1
,
2
,
3
]])
x1
=
np
.
array
([[
1
,
1
,
2
,
2
,
3
,
3
]])
# Call model once to build variables first.
_
,
_
=
model_narrow
(
x0
),
model_wide
(
x1
)
tf2_utils_2x_wide
.
model_to_model_2x_wide
(
model_narrow
,
model_wide
,
epsilon
=
0.2
)
self
.
assertAllClose
(
model_narrow
(
x0
),
model_wide
(
x1
),
rtol
=
1e-05
,
atol
=
1e-05
)
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/policies.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base ProgressivePolicy definition for progressive training.
To write a progressive model, subclass ProgressivePolicy and implement its
abstract methods to handle each training stage.
"""
import
abc
import
dataclasses
from
typing
import
Any
,
Mapping
from
absl
import
logging
import
six
import
tensorflow
as
tf
from
official.common
import
streamz_counters
from
official.modeling.fast_training.progressive
import
utils
from
official.modeling.hyperparams
import
base_config
@
dataclasses
.
dataclass
class
ProgressiveConfig
(
base_config
.
Config
):
pass
@
six
.
add_metaclass
(
abc
.
ABCMeta
)
class
ProgressivePolicy
:
"""The APIs for handling progressive training stages.
Attributes:
cur_model: The model for the current progressive training stage.
cur_train_dataset: The train dataset function for the current stage.
cur_eval_dataset: The eval dataset function for the current stage.
cur_optimizer: The optimizer for the current stage.
cur_checkpoint_items: Items to be saved in and restored from checkpoints,
for the progressive trainer.
is_last_stage: Whether it is currently in the last stage.
Interfaces:
is_stage_advancing: Returns if progressive training is advancing to the
next stage.
update_pt_stage: Update progressive training stage.
"""
def
__init__
(
self
):
"""Initialize stage policy."""
self
.
_cur_train_dataset
=
None
self
.
_cur_eval_dataset
=
None
self
.
_volatiles
=
utils
.
VolatileTrackable
(
optimizer
=
None
,
model
=
None
)
stage_id
=
0
self
.
_stage_id
=
tf
.
Variable
(
stage_id
,
trainable
=
False
,
dtype
=
tf
.
int64
,
aggregation
=
tf
.
VariableAggregation
.
ONLY_FIRST_REPLICA
,
shape
=
[])
self
.
_volatiles
.
reassign_trackable
(
optimizer
=
self
.
get_optimizer
(
stage_id
),
model
=
self
.
get_model
(
stage_id
,
old_model
=
None
))
# pytype: disable=wrong-arg-types # typed-keras
streamz_counters
.
progressive_policy_creation_counter
.
get_cell
(
).
increase_by
(
1
)
def
compute_stage_id
(
self
,
global_step
:
int
)
->
int
:
for
stage_id
in
range
(
self
.
num_stages
()):
global_step
-=
self
.
num_steps
(
stage_id
)
if
global_step
<
0
:
return
stage_id
logging
.
error
(
'Global step %d found no matching progressive stages. '
'Default to the last stage.'
,
global_step
)
return
self
.
num_stages
()
-
1
@
abc
.
abstractmethod
def
num_stages
(
self
)
->
int
:
"""Return the total number of progressive stages."""
pass
@
abc
.
abstractmethod
def
num_steps
(
self
,
stage_id
:
int
)
->
int
:
"""Return the total number of steps in this stage."""
pass
@
abc
.
abstractmethod
def
get_model
(
self
,
stage_id
:
int
,
old_model
:
tf
.
keras
.
Model
=
None
)
->
tf
.
keras
.
Model
:
# pytype: disable=annotation-type-mismatch # typed-keras
"""Return model for this stage. For initialization, `old_model` = None."""
pass
@
abc
.
abstractmethod
def
get_optimizer
(
self
,
stage_id
:
int
)
->
tf
.
keras
.
optimizers
.
Optimizer
:
"""Return optimizer for this stage."""
pass
@
abc
.
abstractmethod
def
get_train_dataset
(
self
,
stage_id
:
int
)
->
tf
.
data
.
Dataset
:
"""Return training Dataset for this stage."""
pass
@
abc
.
abstractmethod
def
get_eval_dataset
(
self
,
stage_id
:
int
)
->
tf
.
data
.
Dataset
:
"""Return evaluation Dataset for this stage."""
pass
@
property
def
cur_model
(
self
)
->
tf
.
keras
.
Model
:
return
self
.
_volatiles
.
model
@
property
def
cur_train_dataset
(
self
)
->
tf
.
data
.
Dataset
:
if
self
.
_cur_train_dataset
is
None
:
self
.
_cur_train_dataset
=
self
.
get_train_dataset
(
self
.
_stage_id
.
numpy
())
return
self
.
_cur_train_dataset
@
property
def
cur_eval_dataset
(
self
)
->
tf
.
data
.
Dataset
:
if
self
.
_cur_eval_dataset
is
None
:
self
.
_cur_eval_dataset
=
self
.
get_eval_dataset
(
self
.
_stage_id
.
numpy
())
return
self
.
_cur_eval_dataset
@
property
def
cur_optimizer
(
self
)
->
tf
.
keras
.
optimizers
.
Optimizer
:
return
self
.
_volatiles
.
optimizer
@
property
def
is_last_stage
(
self
)
->
bool
:
stage_id
=
self
.
_stage_id
.
numpy
()
return
stage_id
>=
self
.
num_stages
()
-
1
@
property
def
cur_checkpoint_items
(
self
)
->
Mapping
[
str
,
Any
]:
return
dict
(
stage_id
=
self
.
_stage_id
,
volatiles
=
self
.
_volatiles
)
def
is_stage_advancing
(
self
,
global_step
:
int
)
->
bool
:
old_stage_id
=
self
.
_stage_id
.
numpy
()
new_stage_id
=
self
.
compute_stage_id
(
global_step
)
return
old_stage_id
!=
new_stage_id
def
update_pt_stage
(
self
,
global_step
:
int
,
pass_old_model
=
True
)
->
None
:
"""Update progressive training internal status.
Call this after a training loop ends.
Args:
global_step: an integer scalar of the current global step.
pass_old_model: whether to pass the old_model to get_model() function.
This is set to False if the old_model is irrelevant (e.g, just a default
model from stage 0).
"""
old_stage_id
=
self
.
_stage_id
.
numpy
()
new_stage_id
=
self
.
compute_stage_id
(
global_step
)
logging
.
info
(
'Switching stage from %d to %d'
,
old_stage_id
,
new_stage_id
)
# Update stage id.
self
.
_stage_id
.
assign
(
new_stage_id
)
# Update dataset function.
self
.
_cur_train_dataset
=
None
self
.
_cur_eval_dataset
=
None
# Update optimizer and model.
new_optimizer
=
self
.
get_optimizer
(
new_stage_id
)
self
.
_volatiles
.
reassign_trackable
(
optimizer
=
new_optimizer
)
new_model
=
self
.
get_model
(
new_stage_id
,
old_model
=
self
.
cur_model
if
pass_old_model
else
None
)
self
.
_volatiles
.
reassign_trackable
(
model
=
new_model
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM binary for the progressive trainer."""
from
absl
import
app
from
absl
import
flags
import
gin
from
official.common
import
distribute_utils
# pylint: disable=unused-import
from
official.common
import
registry_imports
# pylint: enable=unused-import
from
official.common
import
flags
as
tfm_flags
from
official.core
import
task_factory
from
official.core
import
train_utils
from
official.modeling
import
performance
from
official.modeling.fast_training.progressive
import
train_lib
FLAGS
=
flags
.
FLAGS
def
main
(
_
):
gin
.
parse_config_files_and_bindings
(
FLAGS
.
gin_file
,
FLAGS
.
gin_params
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
model_dir
=
FLAGS
.
model_dir
if
'train'
in
FLAGS
.
mode
:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils
.
serialize_config
(
params
,
model_dir
)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if
params
.
runtime
.
mixed_precision_dtype
:
performance
.
set_mixed_precision_policy
(
params
.
runtime
.
mixed_precision_dtype
)
distribution_strategy
=
distribute_utils
.
get_distribution_strategy
(
distribution_strategy
=
params
.
runtime
.
distribution_strategy
,
all_reduce_alg
=
params
.
runtime
.
all_reduce_alg
,
num_gpus
=
params
.
runtime
.
num_gpus
,
tpu_address
=
params
.
runtime
.
tpu
,
**
params
.
runtime
.
model_parallelism
())
with
distribution_strategy
.
scope
():
task
=
task_factory
.
get_task
(
params
.
task
,
logging_dir
=
model_dir
)
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
FLAGS
.
mode
,
params
=
params
,
model_dir
=
model_dir
)
train_utils
.
save_gin_config
(
FLAGS
.
mode
,
model_dir
)
if
__name__
==
'__main__'
:
tfm_flags
.
define_flags
()
app
.
run
(
main
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train_lib.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM progressive training driver library.
Compared to the common training driver, the only difference is that we use
prog_trainer_lib.ProgressiveTrainer instead of the base trainer.
"""
# pytype: disable=attribute-error
import
os
from
typing
import
Any
,
Mapping
,
Tuple
# Import libraries
from
absl
import
logging
import
orbit
import
tensorflow
as
tf
from
official.core
import
base_task
from
official.core
import
config_definitions
from
official.core
import
train_lib
as
base_train_lib
from
official.modeling.fast_training.progressive
import
trainer
as
prog_trainer_lib
def
run_experiment
(
distribution_strategy
:
tf
.
distribute
.
Strategy
,
task
:
base_task
.
Task
,
mode
:
str
,
params
:
config_definitions
.
ExperimentConfig
,
model_dir
:
str
,
run_post_eval
:
bool
=
False
,
save_summary
:
bool
=
True
)
\
->
Tuple
[
tf
.
keras
.
Model
,
Mapping
[
str
,
Any
]]:
"""Runs train/eval configured by the experiment params.
Args:
distribution_strategy: A distribution distribution_strategy.
task: A Task instance.
mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval'
or 'continuous_eval'.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
save_summary: Whether to save train and validation summary.
Returns:
A 2-tuple of (model, eval_logs).
model: `tf.keras.Model` instance.
eval_logs: returns eval metrics logs when run_post_eval is set to True,
otherwise, returns {}.
"""
with
distribution_strategy
.
scope
():
logging
.
info
(
'Running progressive trainer.'
)
trainer
=
prog_trainer_lib
.
ProgressiveTrainer
(
params
,
task
,
ckpt_dir
=
model_dir
,
train
=
'train'
in
mode
,
evaluate
=
(
'eval'
in
mode
)
or
run_post_eval
,
checkpoint_exporter
=
base_train_lib
.
maybe_create_best_ckpt_exporter
(
params
,
model_dir
))
if
trainer
.
checkpoint
:
checkpoint_manager
=
tf
.
train
.
CheckpointManager
(
trainer
.
checkpoint
,
directory
=
model_dir
,
max_to_keep
=
params
.
trainer
.
max_to_keep
,
step_counter
=
trainer
.
global_step
,
checkpoint_interval
=
params
.
trainer
.
checkpoint_interval
,
init_fn
=
trainer
.
initialize
)
else
:
checkpoint_manager
=
None
controller
=
orbit
.
Controller
(
strategy
=
distribution_strategy
,
trainer
=
trainer
if
'train'
in
mode
else
None
,
evaluator
=
trainer
,
global_step
=
trainer
.
global_step
,
steps_per_loop
=
params
.
trainer
.
steps_per_loop
,
checkpoint_manager
=
checkpoint_manager
,
summary_dir
=
os
.
path
.
join
(
model_dir
,
'train'
)
if
(
save_summary
)
else
None
,
eval_summary_dir
=
os
.
path
.
join
(
model_dir
,
'validation'
)
if
(
save_summary
)
else
None
,
summary_interval
=
params
.
trainer
.
summary_interval
if
(
save_summary
)
else
None
)
logging
.
info
(
'Starts to execute mode: %s'
,
mode
)
with
distribution_strategy
.
scope
():
if
mode
==
'train'
:
controller
.
train
(
steps
=
params
.
trainer
.
train_steps
)
elif
mode
==
'train_and_eval'
:
controller
.
train_and_evaluate
(
train_steps
=
params
.
trainer
.
train_steps
,
eval_steps
=
params
.
trainer
.
validation_steps
,
eval_interval
=
params
.
trainer
.
validation_interval
)
elif
mode
==
'eval'
:
controller
.
evaluate
(
steps
=
params
.
trainer
.
validation_steps
)
elif
mode
==
'continuous_eval'
:
def
timeout_fn
():
if
trainer
.
global_step
.
numpy
()
>=
params
.
trainer
.
train_steps
:
return
True
return
False
controller
.
evaluate_continuously
(
steps
=
params
.
trainer
.
validation_steps
,
timeout
=
params
.
trainer
.
continuous_eval_timeout
,
timeout_fn
=
timeout_fn
)
else
:
raise
NotImplementedError
(
'The mode is not implemented: %s'
%
mode
)
if
run_post_eval
:
with
distribution_strategy
.
scope
():
return
trainer
.
model
,
trainer
.
evaluate
(
tf
.
convert_to_tensor
(
params
.
trainer
.
validation_steps
))
else
:
return
trainer
.
model
,
{}
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/train_lib_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the progressive train_lib."""
import
os
from
absl
import
flags
from
absl.testing
import
parameterized
import
dataclasses
import
orbit
import
tensorflow
as
tf
from
tensorflow.python.distribute
import
combinations
from
tensorflow.python.distribute
import
strategy_combinations
from
official.common
import
flags
as
tfm_flags
# pylint: disable=unused-import
from
official.common
import
registry_imports
# pylint: enable=unused-import
from
official.core
import
config_definitions
as
cfg
from
official.core
import
task_factory
from
official.modeling
import
optimization
from
official.modeling.hyperparams
import
params_dict
from
official.modeling.fast_training.progressive
import
policies
from
official.modeling.fast_training.progressive
import
train_lib
from
official.modeling.fast_training.progressive
import
trainer
as
prog_trainer_lib
from
official.utils.testing
import
mock_task
FLAGS
=
flags
.
FLAGS
tfm_flags
.
define_flags
()
@
dataclasses
.
dataclass
class
ProgTaskConfig
(
cfg
.
TaskConfig
):
pass
@
task_factory
.
register_task_cls
(
ProgTaskConfig
)
class
ProgMockTask
(
policies
.
ProgressivePolicy
,
mock_task
.
MockTask
):
"""Progressive task for testing."""
def
__init__
(
self
,
params
:
cfg
.
TaskConfig
,
logging_dir
:
str
=
None
):
mock_task
.
MockTask
.
__init__
(
self
,
params
=
params
,
logging_dir
=
logging_dir
)
policies
.
ProgressivePolicy
.
__init__
(
self
)
def
num_stages
(
self
):
return
2
def
num_steps
(
self
,
stage_id
):
return
2
if
stage_id
==
0
else
4
def
get_model
(
self
,
stage_id
,
old_model
=
None
):
del
stage_id
,
old_model
return
self
.
build_model
()
def
get_optimizer
(
self
,
stage_id
):
"""Build optimizer for each stage."""
params
=
optimization
.
OptimizationConfig
({
'optimizer'
:
{
'type'
:
'adamw'
,
},
'learning_rate'
:
{
'type'
:
'polynomial'
,
'polynomial'
:
{
'initial_learning_rate'
:
0.01
,
'end_learning_rate'
:
0.0
,
'power'
:
1.0
,
'decay_steps'
:
10
,
},
},
'warmup'
:
{
'polynomial'
:
{
'power'
:
1
,
'warmup_steps'
:
2
,
},
'type'
:
'polynomial'
,
}
})
opt_factory
=
optimization
.
OptimizerFactory
(
params
)
optimizer
=
opt_factory
.
build_optimizer
(
opt_factory
.
build_learning_rate
())
return
optimizer
def
get_train_dataset
(
self
,
stage_id
):
del
stage_id
strategy
=
tf
.
distribute
.
get_strategy
()
return
orbit
.
utils
.
make_distributed_dataset
(
strategy
,
self
.
build_inputs
,
None
)
def
get_eval_dataset
(
self
,
stage_id
):
del
stage_id
strategy
=
tf
.
distribute
.
get_strategy
()
return
orbit
.
utils
.
make_distributed_dataset
(
strategy
,
self
.
build_inputs
,
None
)
class
TrainTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
setUp
(
self
):
super
(
TrainTest
,
self
).
setUp
()
self
.
_test_config
=
{
'trainer'
:
{
'checkpoint_interval'
:
10
,
'steps_per_loop'
:
10
,
'summary_interval'
:
10
,
'train_steps'
:
10
,
'validation_steps'
:
5
,
'validation_interval'
:
10
,
'continuous_eval_timeout'
:
1
,
'optimizer_config'
:
{
'optimizer'
:
{
'type'
:
'sgd'
,
},
'learning_rate'
:
{
'type'
:
'constant'
}
}
},
}
@
combinations
.
generate
(
combinations
.
combine
(
distribution_strategy
=
[
strategy_combinations
.
default_strategy
,
strategy_combinations
.
cloud_tpu_strategy
,
strategy_combinations
.
one_device_strategy_gpu
,
],
flag_mode
=
[
'train'
,
'eval'
,
'train_and_eval'
],
run_post_eval
=
[
True
,
False
]))
def
test_end_to_end
(
self
,
distribution_strategy
,
flag_mode
,
run_post_eval
):
model_dir
=
self
.
get_temp_dir
()
experiment_config
=
cfg
.
ExperimentConfig
(
trainer
=
prog_trainer_lib
.
ProgressiveTrainerConfig
(),
task
=
ProgTaskConfig
())
experiment_config
=
params_dict
.
override_params_dict
(
experiment_config
,
self
.
_test_config
,
is_strict
=
False
)
with
distribution_strategy
.
scope
():
task
=
task_factory
.
get_task
(
experiment_config
.
task
,
logging_dir
=
model_dir
)
_
,
logs
=
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
flag_mode
,
params
=
experiment_config
,
model_dir
=
model_dir
,
run_post_eval
=
run_post_eval
)
if
run_post_eval
:
self
.
assertNotEmpty
(
logs
)
else
:
self
.
assertEmpty
(
logs
)
if
flag_mode
==
'eval'
:
return
self
.
assertNotEmpty
(
tf
.
io
.
gfile
.
glob
(
os
.
path
.
join
(
model_dir
,
'checkpoint'
)))
# Tests continuous evaluation.
_
,
logs
=
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
'continuous_eval'
,
params
=
experiment_config
,
model_dir
=
model_dir
,
run_post_eval
=
run_post_eval
)
print
(
logs
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/trainer.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Progressive Trainer implementation.
The trainer implements the Orbit `StandardTrainable` and
`StandardEvaluable` interfaces. Trainers inside this project should be
interchangable and independent on model architectures and tasks.
"""
import
dataclasses
import
os
from
typing
import
Any
,
Optional
# Import libraries
from
absl
import
logging
import
gin
import
orbit
import
tensorflow
as
tf
from
official.core
import
base_task
from
official.core
import
base_trainer
as
trainer_lib
from
official.core
import
config_definitions
from
official.modeling.fast_training.progressive
import
policies
from
official.modeling.fast_training.progressive
import
utils
ExperimentConfig
=
config_definitions
.
ExperimentConfig
@
dataclasses
.
dataclass
class
ProgressiveTrainerConfig
(
config_definitions
.
TrainerConfig
):
"""Configuration for progressive trainer.
Attributes:
progressive: A task-specific config. Users can subclass ProgressiveConfig
and define any task-specific settings in their subclass.
export_checkpoint: A bool. Whether to export checkpoints in non-progressive
manner (without the volatiles wrapper) such that your down-stream tasks
can load checkpoints from a progressive trainer as if it is a regular
checkpoint.
export_checkpoint_interval: A bool. The number of steps between exporting
checkpoints. If None (by default), will use the same value as
TrainerConfig.checkpoint_interval.
export_max_to_keep: The maximum number of exported checkpoints to keep.
If None (by default), will use the same value as
TrainerConfig.max_to_keep.
export_only_final_stage_ckpt: A bool. Whether to just export checkpoints
during the final progressive training stage. In other words, whether to
not export small, partial models. In many cases, it is not meaningful to
finetune a small, partial model in down-stream tasks.
"""
progressive
:
Optional
[
policies
.
ProgressiveConfig
]
=
None
export_checkpoint
:
bool
=
True
export_checkpoint_interval
:
Optional
[
int
]
=
None
export_max_to_keep
:
Optional
[
int
]
=
None
export_only_final_stage_ckpt
:
bool
=
True
@
gin
.
configurable
class
ProgressiveTrainer
(
trainer_lib
.
Trainer
):
"""Implements the progressive trainer shared for TensorFlow models."""
def
__init__
(
self
,
config
:
ExperimentConfig
,
prog_task
:
base_task
.
Task
,
# also implemented ProgressivePolicy.
ckpt_dir
:
str
=
''
,
train
:
bool
=
True
,
evaluate
:
bool
=
True
,
checkpoint_exporter
:
Any
=
None
):
"""Initialize common trainer for TensorFlow models.
Args:
config: An `ExperimentConfig` instance specifying experiment config.
prog_task: An instance both implemented policies.ProgressivePolicy and
base_task.Task.
ckpt_dir: Checkpoint directory.
train: bool, whether or not this trainer will be used for training.
default to True.
evaluate: bool, whether or not this trainer will be used for evaluation.
default to True.
checkpoint_exporter: an object that has the `maybe_export_checkpoint`
interface.
"""
# Gets the current distribution strategy. If not inside any strategy scope,
# it gets a single-replica no-op strategy.
self
.
_strategy
=
tf
.
distribute
.
get_strategy
()
self
.
_config
=
config
self
.
_runtime_options
=
trainer_lib
.
get_runtime_options
(
config
)
self
.
_task
=
prog_task
# Directory for non-progressive checkpoint
self
.
_export_ckpt_dir
=
os
.
path
.
join
(
ckpt_dir
,
'exported_ckpts'
)
tf
.
io
.
gfile
.
makedirs
(
self
.
_export_ckpt_dir
)
self
.
_export_ckpt_manager
=
None
# Receive other checkpoint export, e.g, best checkpoint exporter.
# TODO(lehou): unify the checkpoint exporting logic, although the default
# setting does not use checkpoint_exporter.
self
.
_checkpoint_exporter
=
checkpoint_exporter
self
.
_global_step
=
orbit
.
utils
.
create_global_step
()
self
.
_checkpoint
=
utils
.
CheckpointWithHooks
(
before_load_hook
=
self
.
_update_pt_stage_from_ckpt
,
global_step
=
self
.
global_step
,
**
self
.
_task
.
cur_checkpoint_items
)
self
.
_train_loss
=
tf
.
keras
.
metrics
.
Mean
(
'training_loss'
,
dtype
=
tf
.
float32
)
self
.
_validation_loss
=
tf
.
keras
.
metrics
.
Mean
(
'validation_loss'
,
dtype
=
tf
.
float32
)
self
.
_train_metrics
=
self
.
task
.
build_metrics
(
training
=
True
)
+
self
.
model
.
metrics
self
.
_validation_metrics
=
self
.
task
.
build_metrics
(
training
=
False
)
+
self
.
model
.
metrics
if
train
:
orbit
.
StandardTrainer
.
__init__
(
self
,
None
,
# Manage train_dataset by ourselves, not by StandardTrainer.
options
=
orbit
.
StandardTrainerOptions
(
use_tf_while_loop
=
config
.
trainer
.
train_tf_while_loop
,
use_tf_function
=
config
.
trainer
.
train_tf_function
))
if
evaluate
:
orbit
.
StandardEvaluator
.
__init__
(
self
,
None
,
# Manage train_dataset by ourselves, not by StandardEvaluator.
options
=
orbit
.
StandardEvaluatorOptions
(
use_tf_function
=
config
.
trainer
.
eval_tf_function
))
@
property
def
model
(
self
):
return
self
.
_task
.
cur_model
@
property
def
optimizer
(
self
):
return
self
.
_task
.
cur_optimizer
# override
@
property
def
train_dataset
(
self
):
"""Overriding StandardTrainer.train_dataset."""
return
self
.
_task
.
cur_train_dataset
# override
@
train_dataset
.
setter
def
train_dataset
(
self
,
_
):
raise
SyntaxError
(
'Please do not set train_dataset. Progressive training '
'relies on progressive policy to manager train dataset.'
)
# override
@
property
def
eval_dataset
(
self
):
"""Overriding StandardEvaluator.eval_dataset."""
return
self
.
_task
.
cur_eval_dataset
# override
@
eval_dataset
.
setter
def
eval_dataset
(
self
,
_
):
raise
SyntaxError
(
'Please do not set eval_dataset. Progressive training '
'relies on progressive policy to manager eval dataset.'
)
def
train_loop_end
(
self
):
"""See base class."""
logs
=
{}
for
metric
in
self
.
train_metrics
+
[
self
.
train_loss
]:
logs
[
metric
.
name
]
=
metric
.
result
()
metric
.
reset_states
()
if
callable
(
self
.
optimizer
.
learning_rate
):
logs
[
'learning_rate'
]
=
self
.
optimizer
.
learning_rate
(
self
.
optimizer
.
iterations
)
else
:
logs
[
'learning_rate'
]
=
self
.
optimizer
.
learning_rate
self
.
_maybe_export_non_progressive_checkpoint
(
self
.
_export_ckpt_dir
)
if
self
.
_task
.
is_stage_advancing
(
self
.
global_step
.
numpy
()):
old_train_dataset
=
self
.
train_dataset
# Update progressive properties
self
.
_task
.
update_pt_stage
(
self
.
global_step
.
numpy
())
# Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will
# rebuild the train and eval functions with the updated model.
self
.
_train_loop_fn
=
None
self
.
_eval_loop_fn
=
None
if
self
.
train_dataset
!=
old_train_dataset
:
# Setting `self._train_iter` to None will rebuild the dataset iterator.
self
.
_train_iter
=
None
# Setting `self._export_ckpt_manager` to None will rebuild the checkpoint
# for exporting.
self
.
_export_ckpt_manager
=
None
return
logs
def
_update_pt_stage_from_ckpt
(
self
,
ckpt_file
):
"""Update stage properties based on the global_step variable in a ckpt file.
Before loading variables from a checkpoint file, we need to go to the
correct stage and build corresponding model and optimizer, to make sure that
we retore variables of the right model and optimizer.
Args:
ckpt_file: Checkpoint file that will be restored/read from.
"""
if
not
ckpt_file
:
return
ckpt
=
tf
.
train
.
Checkpoint
(
global_step
=
self
.
global_step
)
ckpt
.
read
(
ckpt_file
).
expect_partial
().
assert_existing_objects_matched
()
if
self
.
_task
.
is_stage_advancing
(
self
.
global_step
.
numpy
()):
old_train_dataset
=
self
.
train_dataset
# Update progressive properties
self
.
_task
.
update_pt_stage
(
self
.
global_step
.
numpy
(),
pass_old_model
=
False
)
# Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will
# rebuild the train and eval functions with the updated model.
self
.
_train_loop_fn
=
None
self
.
_eval_loop_fn
=
None
if
self
.
train_dataset
!=
old_train_dataset
:
# Setting `self._train_iter` to None will rebuild the dataset iterator.
self
.
_train_iter
=
None
# Setting `self._export_ckpt_manager` to None will rebuild the checkpoint
# for exporting.
self
.
_export_ckpt_manager
=
None
def
_maybe_export_non_progressive_checkpoint
(
self
,
export_ckpt_dir
):
"""Export checkpoints in non-progressive format.
This basically removes the wrapping of self._task.cur_checkpoint_items
-- just save the model, optimizer, etc., directly.
The purpose is to let your down-stream tasks to use these checkpoints.
Args:
export_ckpt_dir: A str. folder of exported checkpoints.
"""
if
not
self
.
config
.
trainer
.
export_checkpoint
:
logging
.
info
(
'Not exporting checkpoints.'
)
return
if
not
self
.
_task
.
is_last_stage
and
(
self
.
config
.
trainer
.
export_only_final_stage_ckpt
):
logging
.
info
(
'Not exporting checkpoints until the last stage.'
)
return
if
self
.
_export_ckpt_manager
is
None
:
# Create a checkpoint object just now, to make sure we use
# progressive_policy.cur_model and progressive_policy.cur_optimizer of the
# current stage.
if
hasattr
(
self
.
model
,
'checkpoint_items'
):
checkpoint_items
=
self
.
model
.
checkpoint_items
else
:
checkpoint_items
=
{}
checkpoint
=
tf
.
train
.
Checkpoint
(
global_step
=
self
.
global_step
,
model
=
self
.
model
,
optimizer
=
self
.
optimizer
,
**
checkpoint_items
)
max_to_keep
=
self
.
config
.
trainer
.
export_max_to_keep
or
(
self
.
config
.
trainer
.
max_to_keep
)
checkpoint_interval
=
self
.
config
.
trainer
.
export_checkpoint_interval
or
(
self
.
config
.
trainer
.
checkpoint_interval
)
self
.
_export_ckpt_manager
=
tf
.
train
.
CheckpointManager
(
checkpoint
,
directory
=
export_ckpt_dir
,
checkpoint_name
=
'ckpt'
,
step_counter
=
self
.
global_step
,
max_to_keep
=
max_to_keep
,
checkpoint_interval
=
checkpoint_interval
,
)
# Make sure we export the last checkpoint.
last_checkpoint
=
(
self
.
global_step
.
numpy
()
==
self
.
_config
.
trainer
.
train_steps
)
checkpoint_path
=
self
.
_export_ckpt_manager
.
save
(
checkpoint_number
=
self
.
global_step
.
numpy
(),
check_interval
=
not
last_checkpoint
)
if
checkpoint_path
:
logging
.
info
(
'Checkpoints exported: %s.'
,
checkpoint_path
)
TensorFlow2x/ComputeVision/Classification/models-master/official/modeling/fast_training/progressive/trainer_test.py
0 → 100644
View file @
ee3997b3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the progressive trainer."""
# pylint: disable=g-direct-tensorflow-import
import
os
from
absl.testing
import
parameterized
import
orbit
import
tensorflow
as
tf
from
tensorflow.python.distribute
import
combinations
from
tensorflow.python.distribute
import
strategy_combinations
from
official.core
import
config_definitions
as
cfg
from
official.modeling
import
optimization
from
official.modeling.fast_training.progressive
import
policies
from
official.modeling.fast_training.progressive
import
trainer
as
trainer_lib
from
official.nlp.configs
import
bert
from
official.utils.testing
import
mock_task
def
all_strategy_combinations
():
return
combinations
.
combine
(
distribution
=
[
strategy_combinations
.
default_strategy
,
strategy_combinations
.
cloud_tpu_strategy
,
strategy_combinations
.
one_device_strategy_gpu
,
],)
def
get_exp_config
():
return
cfg
.
ExperimentConfig
(
task
=
cfg
.
TaskConfig
(
model
=
bert
.
PretrainerConfig
()),
trainer
=
trainer_lib
.
ProgressiveTrainerConfig
(
export_checkpoint
=
True
,
export_checkpoint_interval
=
1
,
export_only_final_stage_ckpt
=
False
))
class
TestPolicy
(
policies
.
ProgressivePolicy
,
mock_task
.
MockTask
):
"""Just for testing purposes."""
def
__init__
(
self
,
strategy
,
task_config
,
change_train_dataset
=
True
):
self
.
_strategy
=
strategy
self
.
_change_train_dataset
=
change_train_dataset
self
.
_my_train_dataset
=
None
mock_task
.
MockTask
.
__init__
(
self
,
params
=
task_config
,
logging_dir
=
None
)
policies
.
ProgressivePolicy
.
__init__
(
self
)
def
num_stages
(
self
)
->
int
:
return
2
def
num_steps
(
self
,
stage_id
:
int
)
->
int
:
return
2
if
stage_id
==
0
else
4
def
get_model
(
self
,
stage_id
:
int
,
old_model
:
tf
.
keras
.
Model
)
->
tf
.
keras
.
Model
:
del
stage_id
,
old_model
return
self
.
build_model
()
def
get_optimizer
(
self
,
stage_id
:
int
)
->
tf
.
keras
.
optimizers
.
Optimizer
:
optimizer_type
=
'sgd'
if
stage_id
==
0
else
'adamw'
optimizer_config
=
cfg
.
OptimizationConfig
({
'optimizer'
:
{
'type'
:
optimizer_type
},
'learning_rate'
:
{
'type'
:
'constant'
}})
opt_factory
=
optimization
.
OptimizerFactory
(
optimizer_config
)
return
opt_factory
.
build_optimizer
(
opt_factory
.
build_learning_rate
())
def
get_train_dataset
(
self
,
stage_id
:
int
)
->
tf
.
data
.
Dataset
:
if
not
self
.
_change_train_dataset
and
self
.
_my_train_dataset
:
return
self
.
_my_train_dataset
if
self
.
_strategy
:
self
.
_my_train_dataset
=
orbit
.
utils
.
make_distributed_dataset
(
self
.
_strategy
,
self
.
_build_inputs
,
stage_id
)
else
:
self
.
_my_train_dataset
=
self
.
_build_inputs
(
stage_id
)
return
self
.
_my_train_dataset
def
get_eval_dataset
(
self
,
stage_id
:
int
)
->
tf
.
data
.
Dataset
:
if
self
.
_strategy
:
return
orbit
.
utils
.
make_distributed_dataset
(
self
.
_strategy
,
self
.
_build_inputs
,
stage_id
)
return
self
.
_build_inputs
(
stage_id
)
def
_build_inputs
(
self
,
stage_id
):
def
dummy_data
(
_
):
batch_size
=
2
if
stage_id
==
0
else
1
x
=
tf
.
zeros
(
shape
=
(
batch_size
,
2
),
dtype
=
tf
.
float32
)
label
=
tf
.
zeros
(
shape
=
(
batch_size
,
1
),
dtype
=
tf
.
float32
)
return
x
,
label
dataset
=
tf
.
data
.
Dataset
.
range
(
1
)
dataset
=
dataset
.
repeat
()
return
dataset
.
map
(
dummy_data
,
num_parallel_calls
=
tf
.
data
.
experimental
.
AUTOTUNE
)
class
TrainerTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
setUp
(
self
):
super
(
TrainerTest
,
self
).
setUp
()
self
.
_config
=
get_exp_config
()
def
create_test_trainer
(
self
,
distribution
,
model_dir
,
change_train_dataset
):
trainer
=
trainer_lib
.
ProgressiveTrainer
(
self
.
_config
,
prog_task
=
TestPolicy
(
distribution
,
self
.
_config
.
task
,
change_train_dataset
),
ckpt_dir
=
model_dir
)
return
trainer
@
combinations
.
generate
(
all_strategy_combinations
())
def
test_checkpointing
(
self
,
distribution
):
model_dir
=
self
.
get_temp_dir
()
ckpt_file
=
os
.
path
.
join
(
model_dir
,
'ckpt'
)
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
,
model_dir
,
True
)
self
.
assertFalse
(
trainer
.
_task
.
is_last_stage
)
trainer
.
train
(
tf
.
convert_to_tensor
(
4
,
dtype
=
tf
.
int32
))
self
.
assertTrue
(
trainer
.
_task
.
is_last_stage
)
trainer
.
checkpoint
.
save
(
ckpt_file
)
trainer
=
self
.
create_test_trainer
(
distribution
,
model_dir
,
True
)
self
.
assertFalse
(
trainer
.
_task
.
is_last_stage
)
trainer
.
checkpoint
.
restore
(
ckpt_file
+
'-1'
)
self
.
assertTrue
(
trainer
.
_task
.
is_last_stage
)
@
combinations
.
generate
(
all_strategy_combinations
())
def
test_train_dataset
(
self
,
distribution
):
model_dir
=
self
.
get_temp_dir
()
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
,
model_dir
,
True
)
# Using dataset of stage == 0
train_iter
=
tf
.
nest
.
map_structure
(
iter
,
trainer
.
train_dataset
)
train_data
=
train_iter
.
next
()[
0
]
if
distribution
.
num_replicas_in_sync
>
1
:
train_data
=
train_data
.
values
[
0
]
self
.
assertEqual
(
train_data
.
shape
[
0
],
2
)
trainer
.
train
(
tf
.
convert_to_tensor
(
4
,
dtype
=
tf
.
int32
))
# Using dataset of stage == 1
train_iter
=
tf
.
nest
.
map_structure
(
iter
,
trainer
.
train_dataset
)
train_data
=
train_iter
.
next
()[
0
]
if
distribution
.
num_replicas_in_sync
>
1
:
train_data
=
train_data
.
values
[
0
]
self
.
assertEqual
(
train_data
.
shape
[
0
],
1
)
with
self
.
assertRaises
(
SyntaxError
):
trainer
.
train_dataset
=
None
@
combinations
.
generate
(
all_strategy_combinations
())
def
test_train_dataset_no_switch
(
self
,
distribution
):
model_dir
=
self
.
get_temp_dir
()
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
,
model_dir
,
False
)
trainer
.
train
(
tf
.
convert_to_tensor
(
2
,
dtype
=
tf
.
int32
))
# _train_iter is not reset since the dataset is not changed.
self
.
assertIsNotNone
(
trainer
.
_train_iter
)
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
,
model_dir
,
True
)
trainer
.
train
(
tf
.
convert_to_tensor
(
2
,
dtype
=
tf
.
int32
))
# _train_iter is reset since the dataset changed.
self
.
assertIsNone
(
trainer
.
_train_iter
)
class
TrainerWithMaskedLMTaskTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
def
setUp
(
self
):
super
(
TrainerWithMaskedLMTaskTest
,
self
).
setUp
()
self
.
_config
=
get_exp_config
()
def
create_test_trainer
(
self
,
distribution
):
trainer
=
trainer_lib
.
ProgressiveTrainer
(
self
.
_config
,
prog_task
=
TestPolicy
(
distribution
,
self
.
_config
.
task
),
ckpt_dir
=
self
.
get_temp_dir
())
return
trainer
@
combinations
.
generate
(
all_strategy_combinations
())
def
test_trainer_train
(
self
,
distribution
):
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
)
logs
=
trainer
.
train
(
tf
.
convert_to_tensor
(
5
,
dtype
=
tf
.
int32
))
self
.
assertIn
(
'training_loss'
,
logs
)
self
.
assertIn
(
'learning_rate'
,
logs
)
@
combinations
.
generate
(
all_strategy_combinations
())
def
test_trainer_validate
(
self
,
distribution
):
with
distribution
.
scope
():
trainer
=
self
.
create_test_trainer
(
distribution
)
logs
=
trainer
.
evaluate
(
tf
.
convert_to_tensor
(
5
,
dtype
=
tf
.
int32
))
self
.
assertIn
(
'validation_loss'
,
logs
)
self
.
assertEqual
(
logs
[
'counter'
],
5.
*
distribution
.
num_replicas_in_sync
)
@
combinations
.
generate
(
combinations
.
combine
(
mixed_precision_dtype
=
[
'float32'
,
'bfloat16'
,
'float16'
],
loss_scale
=
[
None
,
'dynamic'
,
128
,
256
],
))
def
test_configure_optimizer
(
self
,
mixed_precision_dtype
,
loss_scale
):
config
=
cfg
.
ExperimentConfig
(
task
=
cfg
.
TaskConfig
(
model
=
bert
.
PretrainerConfig
()),
runtime
=
cfg
.
RuntimeConfig
(
mixed_precision_dtype
=
mixed_precision_dtype
,
loss_scale
=
loss_scale
),
trainer
=
trainer_lib
.
ProgressiveTrainerConfig
(
export_checkpoint
=
True
,
export_checkpoint_interval
=
1
,
export_only_final_stage_ckpt
=
False
))
task
=
TestPolicy
(
None
,
config
.
task
)
trainer
=
trainer_lib
.
ProgressiveTrainer
(
config
,
task
,
self
.
get_temp_dir
())
if
mixed_precision_dtype
!=
'float16'
:
self
.
assertIsInstance
(
trainer
.
optimizer
,
tf
.
keras
.
optimizers
.
SGD
)
elif
mixed_precision_dtype
==
'float16'
and
loss_scale
is
None
:
self
.
assertIsInstance
(
trainer
.
optimizer
,
tf
.
keras
.
optimizers
.
SGD
)
metrics
=
trainer
.
train
(
tf
.
convert_to_tensor
(
5
,
dtype
=
tf
.
int32
))
self
.
assertIn
(
'training_loss'
,
metrics
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
Prev
1
…
15
16
17
18
19
20
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment