"torchvision/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "4a3b9472e480eb96903516f696230244175219ee"
Commit 67ad909d authored by ryan0507's avatar ryan0507
Browse files

YT8M config added, unused util function deleted

parent 1fd7aaaf
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yt8m.configs import yt8m
from official.projects.yt8m.configs.yt8m import yt8m as exp_cfg
class YT8MTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('yt8m_experiment',),)
def test_assemblenet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, cfg.TaskConfig)
self.assertIsInstance(config.task.model, hyperparams.Config)
self.assertIsInstance(config.task.train_data, cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
...@@ -20,7 +20,7 @@ import tensorflow as tf ...@@ -20,7 +20,7 @@ import tensorflow as tf
from official.vision.dataloaders import tfexample_utils from official.vision.dataloaders import tfexample_utils
def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2): def dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2):
"""Dequantize the feature from the byte format to the float format. """Dequantize the feature from the byte format to the float format.
Args: Args:
...@@ -38,7 +38,7 @@ def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2): ...@@ -38,7 +38,7 @@ def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2):
return feat_vector * scalar + bias return feat_vector * scalar + bias
def MakeSummary(name, value): def make_summary(name, value):
"""Creates a tf.Summary proto with the given name and value.""" """Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary() summary = tf.Summary()
val = summary.value.add() val = summary.value.add()
...@@ -47,7 +47,7 @@ def MakeSummary(name, value): ...@@ -47,7 +47,7 @@ def MakeSummary(name, value):
return summary return summary
def AddGlobalStepSummary(summary_writer, def add_global_step_summary(summary_writer,
global_step_val, global_step_val,
global_step_info_dict, global_step_info_dict,
summary_scope="Eval"): summary_scope="Eval"):
...@@ -69,18 +69,18 @@ def AddGlobalStepSummary(summary_writer, ...@@ -69,18 +69,18 @@ def AddGlobalStepSummary(summary_writer,
examples_per_second = global_step_info_dict.get("examples_per_second", -1) examples_per_second = global_step_info_dict.get("examples_per_second", -1)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("GlobalStep/" + summary_scope + "_Hit@1", this_hit_at_one), make_summary("GlobalStep/" + summary_scope + "_Hit@1", this_hit_at_one),
global_step_val) global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("GlobalStep/" + summary_scope + "_Perr", this_perr), make_summary("GlobalStep/" + summary_scope + "_Perr", this_perr),
global_step_val) global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("GlobalStep/" + summary_scope + "_Loss", this_loss), make_summary("GlobalStep/" + summary_scope + "_Loss", this_loss),
global_step_val) global_step_val)
if examples_per_second != -1: if examples_per_second != -1:
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("GlobalStep/" + summary_scope + "_Example_Second", make_summary("GlobalStep/" + summary_scope + "_Example_Second",
examples_per_second), global_step_val) examples_per_second), global_step_val)
summary_writer.flush() summary_writer.flush()
...@@ -92,7 +92,7 @@ def AddGlobalStepSummary(summary_writer, ...@@ -92,7 +92,7 @@ def AddGlobalStepSummary(summary_writer,
return info return info
def AddEpochSummary(summary_writer, def add_epoch_summary(summary_writer,
global_step_val, global_step_val,
epoch_info_dict, epoch_info_dict,
summary_scope="Eval"): summary_scope="Eval"):
...@@ -117,18 +117,18 @@ def AddEpochSummary(summary_writer, ...@@ -117,18 +117,18 @@ def AddEpochSummary(summary_writer,
mean_ap = np.mean(aps) mean_ap = np.mean(aps)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("Epoch/" + summary_scope + "_Avg_Hit@1", avg_hit_at_one), make_summary("Epoch/" + summary_scope + "_Avg_Hit@1", avg_hit_at_one),
global_step_val) global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("Epoch/" + summary_scope + "_Avg_Perr", avg_perr), make_summary("Epoch/" + summary_scope + "_Avg_Perr", avg_perr),
global_step_val) global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("Epoch/" + summary_scope + "_Avg_Loss", avg_loss), make_summary("Epoch/" + summary_scope + "_Avg_Loss", avg_loss),
global_step_val) global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("Epoch/" + summary_scope + "_MAP", mean_ap), global_step_val) make_summary("Epoch/" + summary_scope + "_MAP", mean_ap), global_step_val)
summary_writer.add_summary( summary_writer.add_summary(
MakeSummary("Epoch/" + summary_scope + "_GAP", gap), global_step_val) make_summary("Epoch/" + summary_scope + "_GAP", gap), global_step_val)
summary_writer.flush() summary_writer.flush()
info = ("epoch/eval number {0} | Avg_Hit@1: {1:.3f} | Avg_PERR: {2:.3f} " info = ("epoch/eval number {0} | Avg_Hit@1: {1:.3f} | Avg_PERR: {2:.3f} "
...@@ -138,7 +138,7 @@ def AddEpochSummary(summary_writer, ...@@ -138,7 +138,7 @@ def AddEpochSummary(summary_writer,
return info return info
def GetListOfFeatureNamesAndSizes(feature_names, feature_sizes): def get_list_of_feature_names_and_sizes(feature_names, feature_sizes):
"""Extract the list of feature names and the dimensionality. """Extract the list of feature names and the dimensionality.
Args: Args:
...@@ -164,59 +164,7 @@ def GetListOfFeatureNamesAndSizes(feature_names, feature_sizes): ...@@ -164,59 +164,7 @@ def GetListOfFeatureNamesAndSizes(feature_names, feature_sizes):
return list_of_feature_names, list_of_feature_sizes return list_of_feature_names, list_of_feature_sizes
def ClipGradientNorms(gradients_to_variables, max_norm): def make_yt8m_example(num_segment: int = 5) -> tf.train.SequenceExample:
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
tmp = tf.clip_by_norm(grad.values, max_norm)
grad = tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = tf.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def CombineGradients(tower_grads):
"""Calculate the combined gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been summed
across all towers.
"""
filtered_grads = [
[x for x in grad_list if x[0] is not None] for grad_list in tower_grads
]
final_grads = []
for i in range(len(filtered_grads[0])):
grads = [filtered_grads[t][i] for t in range(len(filtered_grads))]
grad = tf.stack([x[0] for x in grads], 0)
grad = tf.reduce_sum(grad, 0)
final_grads.append((
grad,
filtered_grads[0][i][1],
))
return final_grads
def MakeYt8mExample(num_segment: int = 5) -> tf.train.SequenceExample:
"""Generate fake data for unit tests.""" """Generate fake data for unit tests."""
rgb = np.random.randint(low=256, size=1024, dtype=np.uint8) rgb = np.random.randint(low=256, size=1024, dtype=np.uint8)
audio = np.random.randint(low=256, size=128, dtype=np.uint8) audio = np.random.randint(low=256, size=128, dtype=np.uint8)
...@@ -240,7 +188,7 @@ def MakeYt8mExample(num_segment: int = 5) -> tf.train.SequenceExample: ...@@ -240,7 +188,7 @@ def MakeYt8mExample(num_segment: int = 5) -> tf.train.SequenceExample:
# TODO(yeqing): Move the test related functions to test_utils. # TODO(yeqing): Move the test related functions to test_utils.
def MakeExampleWithFloatFeatures( def make_example_with_float_features(
num_segment: int = 5) -> tf.train.SequenceExample: num_segment: int = 5) -> tf.train.SequenceExample:
"""Generate fake data for unit tests.""" """Generate fake data for unit tests."""
rgb = np.random.rand(1, 2048).astype(np.float32) rgb = np.random.rand(1, 2048).astype(np.float32)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment