"...git@developer.sourcefind.cn:OpenDAS/megatron-lm.git" did not exist on "160bf237afefbd4a123bccee1ff7a5e98d04974f"
Commit 36d73bf6 authored by Abdullah Rashwan's avatar Abdullah Rashwan Committed by A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 331232905
parent 4ea11215
...@@ -159,12 +159,12 @@ def multilevel_crop_and_resize(features, ...@@ -159,12 +159,12 @@ def multilevel_crop_and_resize(features,
with tf.name_scope('multilevel_crop_and_resize'): with tf.name_scope('multilevel_crop_and_resize'):
levels = list(features.keys()) levels = list(features.keys())
min_level = min(levels) min_level = int(min(levels))
max_level = max(levels) max_level = int(max(levels))
batch_size, max_feature_height, max_feature_width, num_filters = ( batch_size, max_feature_height, max_feature_width, num_filters = (
features[min_level].get_shape().as_list()) features[str(min_level)].get_shape().as_list())
if batch_size is None: if batch_size is None:
batch_size = tf.shape(features[min_level])[0] batch_size = tf.shape(features[str(min_level)])[0]
_, num_boxes, _ = boxes.get_shape().as_list() _, num_boxes, _ = boxes.get_shape().as_list()
# Stack feature pyramid into a features_all of shape # Stack feature pyramid into a features_all of shape
...@@ -173,13 +173,13 @@ def multilevel_crop_and_resize(features, ...@@ -173,13 +173,13 @@ def multilevel_crop_and_resize(features,
feature_heights = [] feature_heights = []
feature_widths = [] feature_widths = []
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
shape = features[level].get_shape().as_list() shape = features[str(level)].get_shape().as_list()
feature_heights.append(shape[1]) feature_heights.append(shape[1])
feature_widths.append(shape[2]) feature_widths.append(shape[2])
# Concat tensor of [batch_size, height_l * width_l, num_filters] for each # Concat tensor of [batch_size, height_l * width_l, num_filters] for each
# levels. # levels.
features_all.append( features_all.append(
tf.reshape(features[level], [batch_size, -1, num_filters])) tf.reshape(features[str(level)], [batch_size, -1, num_filters]))
features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters]) features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters])
# Calculate height_l * width_l for each level. # Calculate height_l * width_l for each level.
......
...@@ -69,10 +69,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase): ...@@ -69,10 +69,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase):
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level) feat_size = int(input_size / 2**level)
features[level] = tf.range( features[str(level)] = tf.range(
batch_size * feat_size * feat_size * num_filters, dtype=tf.float32) batch_size * feat_size * feat_size * num_filters, dtype=tf.float32)
features[level] = tf.reshape( features[str(level)] = tf.reshape(
features[level], [batch_size, feat_size, feat_size, num_filters]) features[str(level)], [batch_size, feat_size, feat_size, num_filters])
boxes = tf.constant([ boxes = tf.constant([
[[0, 0, 2, 2]], [[0, 0, 2, 2]],
], dtype=tf.float32) ], dtype=tf.float32)
...@@ -135,10 +135,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase): ...@@ -135,10 +135,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase):
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level) feat_size = int(input_size / 2**level)
features[level] = tf.range( features[str(level)] = tf.range(
batch_size * feat_size * feat_size * num_filters, dtype=tf.float32) batch_size * feat_size * feat_size * num_filters, dtype=tf.float32)
features[level] = tf.reshape( features[str(level)] = tf.reshape(
features[level], [batch_size, feat_size, feat_size, num_filters]) features[str(level)], [batch_size, feat_size, feat_size, num_filters])
boxes = tf.constant([ boxes = tf.constant([
[[0, 0, 2, 3]], [[0, 0, 2, 3]],
], dtype=tf.float32) ], dtype=tf.float32)
...@@ -164,10 +164,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase): ...@@ -164,10 +164,10 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase):
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level) feat_size = int(input_size / 2**level)
features[level] = tf.range( features[str(level)] = tf.range(
batch_size * feat_size * feat_size * num_filters, dtype=tf.float32) batch_size * feat_size * feat_size * num_filters, dtype=tf.float32)
features[level] = tf.reshape( features[str(level)] = tf.reshape(
features[level], [batch_size, feat_size, feat_size, num_filters]) features[str(level)], [batch_size, feat_size, feat_size, num_filters])
boxes = tf.constant([ boxes = tf.constant([
[[0, 0, 2, 2], [0, 0, 2, 3]], [[0, 0, 2, 2], [0, 0, 2, 3]],
], dtype=tf.float32) ], dtype=tf.float32)
...@@ -191,7 +191,7 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase): ...@@ -191,7 +191,7 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase):
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level) feat_size = int(input_size / 2**level)
features[level] = float(level) * tf.ones( features[str(level)] = float(level) * tf.ones(
[batch_size, feat_size, feat_size, num_filters], dtype=tf.float32) [batch_size, feat_size, feat_size, num_filters], dtype=tf.float32)
boxes = tf.constant( boxes = tf.constant(
[ [
...@@ -227,7 +227,7 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase): ...@@ -227,7 +227,7 @@ class MultiLevelCropAndResizeTest(tf.test.TestCase):
features = {} features = {}
for level in range(min_level, max_level + 1): for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level) feat_size = int(input_size / 2**level)
features[level] = tf.constant( features[str(level)] = tf.constant(
np.reshape( np.reshape(
np.arange( np.arange(
batch_size * feat_size * feat_size * num_filters, batch_size * feat_size * feat_size * num_filters,
......
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for model export."""
import abc
import tensorflow as tf
def _decode_image(encoded_image_bytes):
image_tensor = tf.image.decode_image(encoded_image_bytes, channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
def _decode_tf_example(tf_example_string_tensor):
keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=keys_to_features)
image_tensor = _decode_image(parsed_tensors['image/encoded'])
return image_tensor
class ExportModule(tf.Module, metaclass=abc.ABCMeta):
"""Base Export Module."""
def __init__(self, params, batch_size, input_image_size, model=None):
"""Initializes a module for export.
Args:
params: Experiment params.
batch_size: Int or None.
input_image_size: List or Tuple of height, width of the input image.
model: A tf.keras.Model instance to be exported.
"""
super(ExportModule, self).__init__()
self._params = params
self._batch_size = batch_size
self._input_image_size = input_image_size
self._model = model
@abc.abstractmethod
def build_model(self):
"""Builds model and sets self._model."""
@abc.abstractmethod
def _run_inference_on_image_tensors(self, images):
"""Runs inference on images."""
@tf.function
def inference_from_image_tensors(self, input_tensor):
return dict(outputs=self._run_inference_on_image_tensors(input_tensor))
@tf.function
def inference_from_image_bytes(self, input_tensor):
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
_decode_image,
elems=input_tensor,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.uint8),
parallel_iterations=32))
images = tf.stack(images)
return dict(outputs=self._run_inference_on_image_tensors(images))
@tf.function
def inference_from_tf_example(self, input_tensor):
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
_decode_tf_example,
elems=input_tensor,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.uint8),
dtype=tf.uint8,
parallel_iterations=32))
images = tf.stack(images)
return dict(outputs=self._run_inference_on_image_tensors(images))
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = .signatures['serving_default']
output = model_fn(input_images)
"""
import os
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.core import train_utils
from official.modeling import hyperparams
from official.vision.beta import configs
from official.vision.beta.serving import image_classification
FLAGS = flags.FLAGS
flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer(
'batch_size', None, 'The batch size.')
flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def export_inference_graph(input_type, batch_size, input_image_size, params,
checkpoint_path, export_dir):
"""Exports inference graph for the model specified in the exp config.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
input_type: One of `image_tensor`, `image_bytes`, `tf_example`.
batch_size: 'int', or None.
input_image_size: List or Tuple of height and width.
params: Experiment params.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
"""
output_checkpoint_directory = os.path.join(export_dir, 'checkpoint')
output_saved_model_directory = os.path.join(export_dir, 'saved_model')
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = image_classification.ClassificationModule(
params=params,
batch_size=batch_size,
input_image_size=input_image_size)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
model = export_module.build_model()
ckpt = tf.train.Checkpoint(model=model)
ckpt_dir_or_file = checkpoint_path
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
status = ckpt.restore(ckpt_dir_or_file).expect_partial()
if input_type == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[batch_size, input_image_size[0], input_image_size[1], 3],
dtype=tf.uint8)
signatures = {
'serving_default':
export_module.inference_from_image.get_concrete_function(
input_signature)
}
elif input_type == 'image_bytes':
input_signature = tf.TensorSpec(shape=[batch_size], dtype=tf.string)
signatures = {
'serving_default':
export_module.inference_from_image_bytes.get_concrete_function(
input_signature)
}
elif input_type == 'tf_example':
input_signature = tf.TensorSpec(shape=[batch_size], dtype=tf.string)
signatures = {
'serving_default':
export_module.inference_from_tf_example.get_concrete_function(
input_signature)
}
else:
raise ValueError('Unrecognized `input_type`')
status.assert_existing_objects_matched()
ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt'))
tf.saved_model.save(export_module,
output_saved_model_directory,
signatures=signatures)
train_utils.serialize_config(params, export_dir)
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir)
if __name__ == '__main__':
app.run(main)
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection input and model functions for serving/inference."""
import tensorflow as tf
from official.vision.beta.modeling import factory
from official.vision.beta.ops import preprocess_ops
from official.vision.beta.serving import export_base
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class ClassificationModule(export_base.ExportModule):
"""classification Module."""
def build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
self._model = factory.build_classification_model(
input_specs=input_specs,
model_config=self._params.task.model,
l2_regularizer=None)
return self._model
def _build_inputs(self, image):
"""Builds classification model inputs for serving."""
# Center crops and resizes image.
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, self._input_image_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.reshape(
image, [self._input_image_size[0], self._input_image_size[1], 3])
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image,
offset=MEAN_RGB,
scale=STDDEV_RGB)
return image
def _run_inference_on_image_tensors(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs, elems=images,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32),
parallel_iterations=32
)
)
logits = self._model(images, training=False)
return logits
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for image classification export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.vision.beta.serving import image_classification
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self):
params = exp_factory.get_exp_config('resnet_imagenet')
params.task.model.backbone.resnet.model_id = 18
classification_module = image_classification.ClassificationModule(
params, batch_size=1, input_image_size=[224, 224])
return classification_module
def _export_from_module(self, module, input_type, save_directory):
if input_type == 'image_tensor':
input_signature = tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.uint8)
signatures = {
'serving_default':
module.inference_from_image_tensors.get_concrete_function(
input_signature)
}
elif input_type == 'image_bytes':
input_signature = tf.TensorSpec(shape=[None], dtype=tf.string)
signatures = {
'serving_default':
module.inference_from_image_bytes.get_concrete_function(
input_signature)
}
elif input_type == 'tf_example':
input_signature = tf.TensorSpec(shape=[None], dtype=tf.string)
signatures = {
'serving_default':
module.inference_from_tf_example.get_concrete_function(
input_signature)
}
else:
raise ValueError('Unrecognized `input_type`')
tf.saved_model.save(module,
save_directory,
signatures=signatures)
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 224, 224, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((224, 224, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((224, 224, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
module = self._get_classification_module()
model = module.build_model()
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
classification_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
processed_images = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._build_inputs,
elems=tf.zeros((1, 224, 224, 3), dtype=tf.uint8),
fn_output_signature=tf.TensorSpec(
shape=[224, 224, 3], dtype=tf.float32)))
expected_output = model(processed_images, training=False)
out = classification_fn(tf.constant(images))
self.assertAllClose(out['outputs'].numpy(), expected_output.numpy())
if __name__ == '__main__':
tf.test.main()
...@@ -133,16 +133,16 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase): ...@@ -133,16 +133,16 @@ class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters( @parameterized.parameters(
# Multi scale anchor. # Multi scale anchor.
(5, 6, [1.0], { (5, 6, [1.0], {
5: [[[-16., -16., 48., 48.], [-16., 16., 48., 80.]], '5': [[[-16., -16., 48., 48.], [-16., 16., 48., 80.]],
[[16., -16., 80., 48.], [16., 16., 80., 80.]]], [[16., -16., 80., 48.], [16., 16., 80., 80.]]],
6: [[[-32, -32, 96, 96]]] '6': [[[-32, -32, 96, 96]]]
}),) }),)
def testAnchorGenerationDict(self, min_level, max_level, aspect_ratios, def testAnchorGenerationDict(self, min_level, max_level, aspect_ratios,
expected_boxes): expected_boxes):
image_size = [64, 64] image_size = [64, 64]
levels = range(min_level, max_level + 1) levels = range(min_level, max_level + 1)
anchor_sizes = dict((level, 2**(level + 1)) for level in levels) anchor_sizes = dict((str(level), 2**(level + 1)) for level in levels)
strides = dict((level, 2**level) for level in levels) strides = dict((str(level), 2**level) for level in levels)
anchor_gen = anchor_generator.AnchorGenerator( anchor_gen = anchor_generator.AnchorGenerator(
anchor_sizes=anchor_sizes, anchor_sizes=anchor_sizes,
scales=[1.], scales=[1.],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment