Unverified Commit 09d9656f authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'panoptic-segmentation' into panoptic-deeplab-modeling

parents ac671306 49a5706c
......@@ -21,8 +21,8 @@ from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from official.vision.image_classification import optimizer_factory
from official.vision.image_classification.configs import base_configs
from official.legacy.image_classification import optimizer_factory
from official.legacy.image_classification.configs import base_configs
class OptimizerFactoryTest(tf.test.TestCase, parameterized.TestCase):
......
......@@ -17,11 +17,9 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from typing import List, Optional, Text, Tuple
from official.vision.image_classification import augment
import tensorflow as tf
from official.legacy.image_classification import augment
# Calculated from the ImageNet training set
......@@ -272,8 +270,11 @@ def resize_image(image_bytes: tf.Tensor,
A tensor containing the resized image.
"""
print(height, width)
return tf.compat.v1.image.resize(
image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
image_bytes,
tf.convert_to_tensor([height, width]),
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
......
......@@ -245,7 +245,7 @@ def define_keras_flags(model=False,
flags.DEFINE_boolean(
name='enable_tensorboard',
default=False,
help='Whether to enable Tensorboard callback.')
help='Whether to enable TensorBoard callback.')
flags.DEFINE_string(
name='profile_steps',
default=None,
......
......@@ -19,9 +19,8 @@ from __future__ import division
from __future__ import print_function
import dataclasses
from official.legacy.image_classification.configs import base_configs
from official.modeling.hyperparams import base_config
from official.vision.image_classification.configs import base_configs
@dataclasses.dataclass
......@@ -30,12 +29,14 @@ class ResNetModelConfig(base_configs.ModelConfig):
name: str = 'ResNet'
num_classes: int = 1000
model_params: base_config.Config = dataclasses.field(
# pylint: disable=g-long-lambda
default_factory=lambda: {
'num_classes': 1000,
'batch_size': None,
'use_l2_regularizer': True,
'rescale_inputs': False,
})
# pylint: enable=g-long-lambda
loss: base_configs.LossConfig = base_configs.LossConfig(
name='sparse_categorical_crossentropy')
optimizer: base_configs.OptimizerConfig = base_configs.OptimizerConfig(
......
......@@ -24,13 +24,13 @@ from absl import logging
import orbit
import tensorflow as tf
from official.common import distribute_utils
from official.legacy.image_classification.resnet import common
from official.legacy.image_classification.resnet import imagenet_preprocessing
from official.legacy.image_classification.resnet import resnet_runnable
from official.modeling import performance
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_runnable
flags.DEFINE_boolean(name='use_tf_function', default=True,
help='Wrap the train and test step inside a '
......
......@@ -28,7 +28,7 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.legacy.image_classification.resnet import imagenet_preprocessing
layers = tf.keras.layers
......
......@@ -16,12 +16,12 @@
import orbit
import tensorflow as tf
from official.legacy.image_classification.resnet import common
from official.legacy.image_classification.resnet import imagenet_preprocessing
from official.legacy.image_classification.resnet import resnet_model
from official.modeling import grad_utils
from official.modeling import performance
from official.utils.flags import core as flags_core
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_model
class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator):
......@@ -71,15 +71,9 @@ class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator):
self.optimizer = common.get_optimizer(lr_schedule)
# Make sure iterations variable is created inside scope.
self.global_step = self.optimizer.iterations
use_graph_rewrite = flags_obj.fp16_implementation == 'graph_rewrite'
if use_graph_rewrite and not flags_obj.use_tf_function:
raise ValueError('--fp16_implementation=graph_rewrite requires '
'--use_tf_function to be true')
self.optimizer = performance.configure_optimizer(
self.optimizer,
use_float16=self.dtype == tf.float16,
use_graph_rewrite=use_graph_rewrite,
loss_scale=flags_core.get_loss_scale(flags_obj, default_for_fp16=128))
self.train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
......@@ -105,7 +99,8 @@ class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator):
datasets_num_private_threads=self.flags_obj
.datasets_num_private_threads,
dtype=self.dtype,
drop_remainder=True)
drop_remainder=True,
training_dataset_cache=self.flags_obj.training_dataset_cache)
orbit.StandardTrainer.__init__(
self,
train_dataset,
......
......@@ -26,8 +26,8 @@ from absl import flags
import tensorflow as tf
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_model
from official.legacy.image_classification.resnet import imagenet_preprocessing
from official.legacy.image_classification.resnet import resnet_model
FLAGS = flags.FLAGS
......
......@@ -32,7 +32,7 @@ model.
# https://github.com/tensorflow/models/tree/master/official#requirements
export PYTHONPATH="$PYTHONPATH:/path/to/models"
cd /path/to/models/official/nlp/transformer
cd /path/to/models/official/legacy/transformer
# Export variables
PARAM_SET=big
......
......@@ -16,7 +16,6 @@
import math
import tensorflow as tf
from official.nlp.modeling import layers
class Attention(tf.keras.layers.Layer):
......@@ -51,28 +50,31 @@ class Attention(tf.keras.layers.Layer):
attention_initializer = _glorot_initializer(input_shape.as_list()[-1],
self.hidden_size)
self.query_dense_layer = layers.DenseEinsum(
output_shape=(self.num_heads, size_per_head),
self.query_dense_layer = tf.keras.layers.experimental.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=attention_initializer,
use_bias=False,
bias_axes=None,
name="query")
self.key_dense_layer = layers.DenseEinsum(
output_shape=(self.num_heads, size_per_head),
self.key_dense_layer = tf.keras.layers.experimental.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=attention_initializer,
use_bias=False,
bias_axes=None,
name="key")
self.value_dense_layer = layers.DenseEinsum(
output_shape=(self.num_heads, size_per_head),
self.value_dense_layer = tf.keras.layers.experimental.EinsumDense(
"BTE,ENH->BTNH",
output_shape=(None, self.num_heads, size_per_head),
kernel_initializer=attention_initializer,
use_bias=False,
bias_axes=None,
name="value")
output_initializer = _glorot_initializer(self.hidden_size, self.hidden_size)
self.output_dense_layer = layers.DenseEinsum(
output_shape=self.hidden_size,
num_summed_dimensions=2,
self.output_dense_layer = tf.keras.layers.experimental.EinsumDense(
"BTNH,NHE->BTE",
output_shape=(None, self.hidden_size),
kernel_initializer=output_initializer,
use_bias=False,
bias_axes=None,
name="output_transform")
super(Attention, self).build(input_shape)
......
......@@ -24,12 +24,13 @@ import unicodedata
from absl import app
from absl import flags
from absl import logging
import six
from six.moves import range
import tensorflow as tf
from official.nlp.transformer.utils import metrics
from official.nlp.transformer.utils import tokenizer
from official.legacy.transformer.utils import metrics
from official.legacy.transformer.utils import tokenizer
from official.utils.flags import core as flags_core
......@@ -109,11 +110,11 @@ def bleu_on_list(ref_lines, hyp_lines, case_sensitive=False):
def main(unused_argv):
if FLAGS.bleu_variant in ("both", "uncased"):
score = bleu_wrapper(FLAGS.reference, FLAGS.translation, False)
tf.logging.info("Case-insensitive results: %f" % score)
logging.info("Case-insensitive results: %f", score)
if FLAGS.bleu_variant in ("both", "cased"):
score = bleu_wrapper(FLAGS.reference, FLAGS.translation, True)
tf.logging.info("Case-sensitive results: %f" % score)
logging.info("Case-sensitive results: %f", score)
def define_compute_bleu_flags():
......@@ -142,7 +143,6 @@ def define_compute_bleu_flags():
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_compute_bleu_flags()
FLAGS = flags.FLAGS
app.run(main)
......@@ -18,7 +18,7 @@ import tempfile
import tensorflow as tf
from official.nlp.transformer import compute_bleu
from official.legacy.transformer import compute_bleu
class ComputeBleuTest(tf.test.TestCase):
......
......@@ -29,7 +29,7 @@ from six.moves import urllib
from six.moves import zip
import tensorflow.compat.v1 as tf
from official.nlp.transformer.utils import tokenizer
from official.legacy.transformer.utils import tokenizer
from official.utils.flags import core as flags_core
# pylint: enable=g-bad-import-order
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment