Unverified Commit 32671be9 authored by Xavier Gibert's avatar Xavier Gibert Committed by GitHub
Browse files

attention_ocr: added export for SavedModel format. (#8757)

* Added export for SavedModel format.

* Fixed some pylint errors.
parent b548c7fd
...@@ -166,6 +166,14 @@ implement one in Python or C++. ...@@ -166,6 +166,14 @@ implement one in Python or C++.
The recommended way is to use the [Serving infrastructure][serving]. The recommended way is to use the [Serving infrastructure][serving].
To export to SavedModel format:
```
python model_export.py \
--checkpoint=model.ckpt-399731 \
--export_dir=/tmp/attention_ocr_export
```
Alternatively you can: Alternatively you can:
1. define a placeholder for images (or use directly an numpy array) 1. define a placeholder for images (or use directly an numpy array)
2. [create a graph ](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/eval.py#L60) 2. [create a graph ](https://github.com/tensorflow/models/blob/master/research/attention_ocr/python/eval.py#L60)
...@@ -188,7 +196,7 @@ other than a one time experiment please use the [TensorFlow Serving][serving]. ...@@ -188,7 +196,7 @@ other than a one time experiment please use the [TensorFlow Serving][serving].
[1]: https://github.com/tensorflow/tensorflow/blob/aaf7adc/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py [1]: https://github.com/tensorflow/tensorflow/blob/aaf7adc/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
[2]: https://www.tensorflow.org/api_docs/python/tf/contrib/framework/assign_from_checkpoint_fn [2]: https://www.tensorflow.org/api_docs/python/tf/contrib/framework/assign_from_checkpoint_fn
[serving]: https://tensorflow.github.io/serving/serving_basic [serving]: https://www.tensorflow.org/tfx/serving/serving_basic
## Disclaimer ## Disclaimer
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
# ============================================================================== # ==============================================================================
"""Define flags are common for both train.py and eval.py scripts.""" """Define flags are common for both train.py and eval.py scripts."""
import logging
import sys import sys
from tensorflow.python.platform import flags from tensorflow.python.platform import flags
import logging
import datasets import datasets
import model import model
...@@ -35,9 +35,17 @@ logging.basicConfig( ...@@ -35,9 +35,17 @@ logging.basicConfig(
datefmt='%Y-%m-%d %H:%M:%S') datefmt='%Y-%m-%d %H:%M:%S')
_common_flags_defined = False
def define(): def define():
"""Define common flags.""" """Define common flags."""
# yapf: disable # yapf: disable
# common_flags.define() may be called multiple times in unit tests.
global _common_flags_defined
if _common_flags_defined:
return
_common_flags_defined = True
flags.DEFINE_integer('batch_size', 32, flags.DEFINE_integer('batch_size', 32,
'Batch size.') 'Batch size.')
...@@ -74,7 +82,7 @@ def define(): ...@@ -74,7 +82,7 @@ def define():
'the optimizer to use') 'the optimizer to use')
flags.DEFINE_float('momentum', 0.9, flags.DEFINE_float('momentum', 0.9,
'momentum value for the momentum optimizer if used') 'momentum value for the momentum optimizer if used')
flags.DEFINE_bool('use_augment_input', True, flags.DEFINE_bool('use_augment_input', True,
'If True will use image augmentation') 'If True will use image augmentation')
......
...@@ -144,9 +144,6 @@ def preprocess_image(image, augment=False, central_crop_size=None, ...@@ -144,9 +144,6 @@ def preprocess_image(image, augment=False, central_crop_size=None,
images = [augment_image(img) for img in images] images = [augment_image(img) for img in images]
image = tf.concat(images, 1) image = tf.concat(images, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.5)
return image return image
......
...@@ -177,6 +177,8 @@ def get_split(split_name, dataset_dir=None, config=None): ...@@ -177,6 +177,8 @@ def get_split(split_name, dataset_dir=None, config=None):
items_to_descriptions=config['items_to_descriptions'], items_to_descriptions=config['items_to_descriptions'],
# additional parameters for convenience. # additional parameters for convenience.
charset=charset, charset=charset,
charset_file=charset_file,
image_shape=config['image_shape'],
num_char_classes=len(charset), num_char_classes=len(charset),
num_of_views=config['num_of_views'], num_of_views=config['num_of_views'],
max_sequence_length=config['max_sequence_length'], max_sequence_length=config['max_sequence_length'],
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Functions to build the Attention OCR model. """Functions to build the Attention OCR model.
Usage example: Usage example:
...@@ -26,6 +25,7 @@ Usage example: ...@@ -26,6 +25,7 @@ Usage example:
import sys import sys
import collections import collections
import logging import logging
import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorflow.contrib import slim from tensorflow.contrib import slim
from tensorflow.contrib.slim.nets import inception from tensorflow.contrib.slim.nets import inception
...@@ -35,29 +35,28 @@ import sequence_layers ...@@ -35,29 +35,28 @@ import sequence_layers
import utils import utils
OutputEndpoints = collections.namedtuple('OutputEndpoints', [ OutputEndpoints = collections.namedtuple('OutputEndpoints', [
'chars_logit', 'chars_log_prob', 'predicted_chars', 'predicted_scores', 'chars_logit', 'chars_log_prob', 'predicted_chars', 'predicted_scores',
'predicted_text' 'predicted_text', 'predicted_length', 'predicted_conf',
'normalized_seq_conf'
]) ])
# TODO(gorban): replace with tf.HParams when it is released. # TODO(gorban): replace with tf.HParams when it is released.
ModelParams = collections.namedtuple('ModelParams', [ ModelParams = collections.namedtuple(
'num_char_classes', 'seq_length', 'num_views', 'null_code' 'ModelParams', ['num_char_classes', 'seq_length', 'num_views', 'null_code'])
])
ConvTowerParams = collections.namedtuple('ConvTowerParams', ['final_endpoint']) ConvTowerParams = collections.namedtuple('ConvTowerParams', ['final_endpoint'])
SequenceLogitsParams = collections.namedtuple('SequenceLogitsParams', [ SequenceLogitsParams = collections.namedtuple('SequenceLogitsParams', [
'use_attention', 'use_autoregression', 'num_lstm_units', 'weight_decay', 'use_attention', 'use_autoregression', 'num_lstm_units', 'weight_decay',
'lstm_state_clip_value' 'lstm_state_clip_value'
]) ])
SequenceLossParams = collections.namedtuple('SequenceLossParams', [ SequenceLossParams = collections.namedtuple(
'label_smoothing', 'ignore_nulls', 'average_across_timesteps' 'SequenceLossParams',
]) ['label_smoothing', 'ignore_nulls', 'average_across_timesteps'])
EncodeCoordinatesParams = collections.namedtuple('EncodeCoordinatesParams', [ EncodeCoordinatesParams = collections.namedtuple('EncodeCoordinatesParams',
'enabled' ['enabled'])
])
def _dict_to_array(id_to_char, default_character): def _dict_to_array(id_to_char, default_character):
...@@ -85,16 +84,16 @@ class CharsetMapper(object): ...@@ -85,16 +84,16 @@ class CharsetMapper(object):
""" """
mapping_strings = tf.constant(_dict_to_array(charset, default_character)) mapping_strings = tf.constant(_dict_to_array(charset, default_character))
self.table = tf.contrib.lookup.index_to_string_table_from_tensor( self.table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_character) mapping=mapping_strings, default_value=default_character)
def get_text(self, ids): def get_text(self, ids):
"""Returns a string corresponding to a sequence of character ids. """Returns a string corresponding to a sequence of character ids.
Args: Args:
ids: a tensor with shape [batch_size, max_sequence_length] ids: a tensor with shape [batch_size, max_sequence_length]
""" """
return tf.reduce_join( return tf.reduce_join(
self.table.lookup(tf.to_int64(ids)), reduction_indices=1) self.table.lookup(tf.to_int64(ids)), reduction_indices=1)
def get_softmax_loss_fn(label_smoothing): def get_softmax_loss_fn(label_smoothing):
...@@ -111,16 +110,152 @@ def get_softmax_loss_fn(label_smoothing): ...@@ -111,16 +110,152 @@ def get_softmax_loss_fn(label_smoothing):
def loss_fn(labels, logits): def loss_fn(labels, logits):
return (tf.nn.softmax_cross_entropy_with_logits( return (tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)) logits=logits, labels=labels))
else: else:
def loss_fn(labels, logits): def loss_fn(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits( return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels) logits=logits, labels=labels)
return loss_fn return loss_fn
def get_tensor_dimensions(tensor):
"""Returns the shape components of a 4D tensor with variable batch size.
Args:
tensor : A 4D tensor, whose last 3 dimensions are known at graph
construction time.
Returns:
batch_size : The first dimension as a tensor object.
height : The second dimension as a scalar value.
width : The third dimension as a scalar value.
num_features : The forth dimension as a scalar value.
Raises:
ValueError: if input tensor does not have 4 dimensions.
"""
if len(tensor.get_shape().dims) != 4:
raise ValueError(
'Incompatible shape: len(tensor.get_shape().dims) != 4 (%d != 4)' %
len(tensor.get_shape().dims))
batch_size = tf.shape(tensor)[0]
height = tensor.get_shape().dims[1].value
width = tensor.get_shape().dims[2].value
num_features = tensor.get_shape().dims[3].value
return batch_size, height, width, num_features
def lookup_indexed_value(indices, row_vecs):
"""Lookup values in each row of 'row_vecs' indexed by 'indices'.
For each sample in the batch, look up the element for the corresponding
index.
Args:
indices : A tensor of shape (batch, )
row_vecs : A tensor of shape [batch, depth]
Returns:
A tensor of shape (batch, ) formed by row_vecs[i, indices[i]].
"""
gather_indices = tf.stack((tf.range(
tf.shape(row_vecs)[0], dtype=tf.int32), tf.cast(indices, tf.int32)),
axis=1)
return tf.gather_nd(row_vecs, gather_indices)
@utils.ConvertAllInputsToTensors
def max_char_logprob_cumsum(char_log_prob):
"""Computes the cumulative sum of character logprob for all sequence lengths.
Args:
char_log_prob: A tensor of shape [batch x seq_length x num_char_classes]
with log probabilities of a character.
Returns:
A tensor of shape [batch x (seq_length+1)] where each element x[_, j] is
the sum of the max char logprob for all positions upto j.
Note this duplicates the final column and produces (seq_length+1) columns
so the same function can be used regardless whether use_length_predictions
is true or false.
"""
max_char_log_prob = tf.reduce_max(char_log_prob, reduction_indices=2)
# For an input array [a, b, c]) tf.cumsum returns [a, a + b, a + b + c] if
# exclusive set to False (default).
return tf.cumsum(max_char_log_prob, axis=1, exclusive=False)
def find_length_by_null(predicted_chars, null_code):
"""Determine sequence length by finding null_code among predicted char IDs.
Given the char class ID for each position, compute the sequence length.
Note that this function computes this based on the number of null_code,
instead of the position of the first null_code.
Args:
predicted_chars: A tensor of [batch x seq_length] where each element stores
the char class ID with max probability;
null_code: an int32, character id for the NULL.
Returns:
A [batch, ] tensor which stores the sequence length for each sample.
"""
return tf.reduce_sum(
tf.cast(tf.not_equal(null_code, predicted_chars), tf.int32), axis=1)
def axis_pad(tensor, axis, before=0, after=0, constant_values=0.0):
"""Pad a tensor with the specified values along a single axis.
Args:
tensor: a Tensor;
axis: the dimension to add pad along to;
before: number of values to add before the contents of tensor in the
selected dimension;
after: number of values to add after the contents of tensor in the selected
dimension;
constant_values: the scalar pad value to use. Must be same type as tensor.
Returns:
A Tensor. Has the same type as the input tensor, but with a changed shape
along the specified dimension.
"""
if before == 0 and after == 0:
return tensor
ndims = tensor.shape.ndims
padding_size = np.zeros((ndims, 2), dtype='int32')
padding_size[axis] = before, after
return tf.pad(
tensor=tensor,
paddings=tf.constant(padding_size),
constant_values=constant_values)
def null_based_length_prediction(chars_log_prob, null_code):
"""Computes length and confidence of prediction based on positions of NULLs.
Args:
chars_log_prob: A tensor of shape [batch x seq_length x num_char_classes]
with log probabilities of a character;
null_code: an int32, character id for the NULL.
Returns:
A tuple (text_log_prob, predicted_length), where
text_log_prob - is a tensor of the same shape as length_log_prob.
Element #0 of the output corresponds to probability of the empty string,
element #seq_length - is the probability of length=seq_length.
predicted_length is a tensor with shape [batch].
"""
predicted_chars = tf.to_int32(tf.argmax(chars_log_prob, axis=2))
# We do right pad to support sequences with seq_length elements.
text_log_prob = max_char_logprob_cumsum(
axis_pad(chars_log_prob, axis=1, after=1))
predicted_length = find_length_by_null(predicted_chars, null_code)
return text_log_prob, predicted_length
class Model(object): class Model(object):
"""Class to create the Attention OCR Model.""" """Class to create the Attention OCR Model."""
...@@ -137,24 +272,24 @@ class Model(object): ...@@ -137,24 +272,24 @@ class Model(object):
num_char_classes: size of character set. num_char_classes: size of character set.
seq_length: number of characters in a sequence. seq_length: number of characters in a sequence.
num_views: Number of views (conv towers) to use. num_views: Number of views (conv towers) to use.
null_code: A character code corresponding to a character which null_code: A character code corresponding to a character which indicates
indicates end of a sequence. end of a sequence.
mparams: a dictionary with hyper parameters for methods, keys - mparams: a dictionary with hyper parameters for methods, keys - function
function names, values - corresponding namedtuples. names, values - corresponding namedtuples.
charset: an optional dictionary with a mapping between character ids and charset: an optional dictionary with a mapping between character ids and
utf8 strings. If specified the OutputEndpoints.predicted_text will utf8 strings. If specified the OutputEndpoints.predicted_text will utf8
utf8 encoded strings corresponding to the character ids returned by encoded strings corresponding to the character ids returned by
OutputEndpoints.predicted_chars (by default the predicted_text contains OutputEndpoints.predicted_chars (by default the predicted_text contains
an empty vector). an empty vector).
NOTE: Make sure you call tf.tables_initializer().run() if the charset NOTE: Make sure you call tf.tables_initializer().run() if the charset
specified. specified.
""" """
super(Model, self).__init__() super(Model, self).__init__()
self._params = ModelParams( self._params = ModelParams(
num_char_classes=num_char_classes, num_char_classes=num_char_classes,
seq_length=seq_length, seq_length=seq_length,
num_views=num_views, num_views=num_views,
null_code=null_code) null_code=null_code)
self._mparams = self.default_mparams() self._mparams = self.default_mparams()
if mparams: if mparams:
self._mparams.update(mparams) self._mparams.update(mparams)
...@@ -162,21 +297,22 @@ class Model(object): ...@@ -162,21 +297,22 @@ class Model(object):
def default_mparams(self): def default_mparams(self):
return { return {
'conv_tower_fn': 'conv_tower_fn':
ConvTowerParams(final_endpoint='Mixed_5d'), ConvTowerParams(final_endpoint='Mixed_5d'),
'sequence_logit_fn': 'sequence_logit_fn':
SequenceLogitsParams( SequenceLogitsParams(
use_attention=True, use_attention=True,
use_autoregression=True, use_autoregression=True,
num_lstm_units=256, num_lstm_units=256,
weight_decay=0.00004, weight_decay=0.00004,
lstm_state_clip_value=10.0), lstm_state_clip_value=10.0),
'sequence_loss_fn': 'sequence_loss_fn':
SequenceLossParams( SequenceLossParams(
label_smoothing=0.1, label_smoothing=0.1,
ignore_nulls=True, ignore_nulls=True,
average_across_timesteps=False), average_across_timesteps=False),
'encode_coordinates_fn': EncodeCoordinatesParams(enabled=False) 'encode_coordinates_fn':
EncodeCoordinatesParams(enabled=False)
} }
def set_mparam(self, function, **kwargs): def set_mparam(self, function, **kwargs):
...@@ -205,7 +341,7 @@ class Model(object): ...@@ -205,7 +341,7 @@ class Model(object):
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training):
net, _ = inception.inception_v3_base( net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint) images, final_endpoint=mparams.final_endpoint)
return net return net
def _create_lstm_inputs(self, net): def _create_lstm_inputs(self, net):
...@@ -222,10 +358,10 @@ class Model(object): ...@@ -222,10 +358,10 @@ class Model(object):
""" """
num_features = net.get_shape().dims[1].value num_features = net.get_shape().dims[1].value
if num_features < self._params.seq_length: if num_features < self._params.seq_length:
raise AssertionError('Incorrect dimension #1 of input tensor' raise AssertionError(
' %d should be bigger than %d (shape=%s)' % 'Incorrect dimension #1 of input tensor'
(num_features, self._params.seq_length, ' %d should be bigger than %d (shape=%s)' %
net.get_shape())) (num_features, self._params.seq_length, net.get_shape()))
elif num_features > self._params.seq_length: elif num_features > self._params.seq_length:
logging.warning('Ignoring some features: use %d of %d (shape=%s)', logging.warning('Ignoring some features: use %d of %d (shape=%s)',
self._params.seq_length, num_features, net.get_shape()) self._params.seq_length, num_features, net.get_shape())
...@@ -252,7 +388,7 @@ class Model(object): ...@@ -252,7 +388,7 @@ class Model(object):
A tensor with the same size as any input tensors. A tensor with the same size as any input tensors.
""" """
batch_size, height, width, num_features = [ batch_size, height, width, num_features = [
d.value for d in nets_list[0].get_shape().dims d.value for d in nets_list[0].get_shape().dims
] ]
xy_flat_shape = (batch_size, 1, height * width, num_features) xy_flat_shape = (batch_size, 1, height * width, num_features)
nets_for_merge = [] nets_for_merge = []
...@@ -261,7 +397,7 @@ class Model(object): ...@@ -261,7 +397,7 @@ class Model(object):
nets_for_merge.append(tf.reshape(net, xy_flat_shape)) nets_for_merge.append(tf.reshape(net, xy_flat_shape))
merged_net = tf.concat(nets_for_merge, 1) merged_net = tf.concat(nets_for_merge, 1)
net = slim.max_pool2d( net = slim.max_pool2d(
merged_net, kernel_size=[len(nets_list), 1], stride=1) merged_net, kernel_size=[len(nets_list), 1], stride=1)
net = tf.reshape(net, (batch_size, height, width, num_features)) net = tf.reshape(net, (batch_size, height, width, num_features))
return net return net
...@@ -279,16 +415,17 @@ class Model(object): ...@@ -279,16 +415,17 @@ class Model(object):
""" """
with tf.variable_scope('pool_views_fn/STCK'): with tf.variable_scope('pool_views_fn/STCK'):
net = tf.concat(nets, 1) net = tf.concat(nets, 1)
batch_size = net.get_shape().dims[0].value batch_size = tf.shape(net)[0]
image_size = net.get_shape().dims[1].value * net.get_shape().dims[2].value
feature_size = net.get_shape().dims[3].value feature_size = net.get_shape().dims[3].value
return tf.reshape(net, [batch_size, -1, feature_size]) return tf.reshape(net, tf.stack([batch_size, image_size, feature_size]))
def char_predictions(self, chars_logit): def char_predictions(self, chars_logit):
"""Returns confidence scores (softmax values) for predicted characters. """Returns confidence scores (softmax values) for predicted characters.
Args: Args:
chars_logit: chars logits, a tensor with shape chars_logit: chars logits, a tensor with shape [batch_size x seq_length x
[batch_size x seq_length x num_char_classes] num_char_classes]
Returns: Returns:
A tuple (ids, log_prob, scores), where: A tuple (ids, log_prob, scores), where:
...@@ -303,10 +440,13 @@ class Model(object): ...@@ -303,10 +440,13 @@ class Model(object):
log_prob = utils.logits_to_log_prob(chars_logit) log_prob = utils.logits_to_log_prob(chars_logit)
ids = tf.to_int32(tf.argmax(log_prob, axis=2), name='predicted_chars') ids = tf.to_int32(tf.argmax(log_prob, axis=2), name='predicted_chars')
mask = tf.cast( mask = tf.cast(
slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool)
all_scores = tf.nn.softmax(chars_logit) all_scores = tf.nn.softmax(chars_logit)
selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores') selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores')
scores = tf.reshape(selected_scores, shape=(-1, self._params.seq_length)) scores = tf.reshape(
selected_scores,
shape=(-1, self._params.seq_length),
name='predicted_scores')
return ids, log_prob, scores return ids, log_prob, scores
def encode_coordinates_fn(self, net): def encode_coordinates_fn(self, net):
...@@ -323,12 +463,12 @@ class Model(object): ...@@ -323,12 +463,12 @@ class Model(object):
""" """
mparams = self._mparams['encode_coordinates_fn'] mparams = self._mparams['encode_coordinates_fn']
if mparams.enabled: if mparams.enabled:
batch_size, h, w, _ = net.shape.as_list() batch_size, h, w, _ = get_tensor_dimensions(net)
x, y = tf.meshgrid(tf.range(w), tf.range(h)) x, y = tf.meshgrid(tf.range(w), tf.range(h))
w_loc = slim.one_hot_encoding(x, num_classes=w) w_loc = slim.one_hot_encoding(x, num_classes=w)
h_loc = slim.one_hot_encoding(y, num_classes=h) h_loc = slim.one_hot_encoding(y, num_classes=h)
loc = tf.concat([h_loc, w_loc], 2) loc = tf.concat([h_loc, w_loc], 2)
loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) loc = tf.tile(tf.expand_dims(loc, 0), tf.stack([batch_size, 1, 1, 1]))
return tf.concat([net, loc], 3) return tf.concat([net, loc], 3)
else: else:
return net return net
...@@ -341,7 +481,8 @@ class Model(object): ...@@ -341,7 +481,8 @@ class Model(object):
"""Creates a base part of the Model (no gradients, losses or summaries). """Creates a base part of the Model (no gradients, losses or summaries).
Args: Args:
images: A tensor of shape [batch_size, height, width, channels]. images: A tensor of shape [batch_size, height, width, channels] with pixel
values in the range [0.0, 1.0].
labels_one_hot: Optional (can be None) one-hot encoding for ground truth labels_one_hot: Optional (can be None) one-hot encoding for ground truth
labels. If provided the function will create a model for training. labels. If provided the function will create a model for training.
scope: Optional variable_scope. scope: Optional variable_scope.
...@@ -353,14 +494,19 @@ class Model(object): ...@@ -353,14 +494,19 @@ class Model(object):
""" """
logging.debug('images: %s', images) logging.debug('images: %s', images)
is_training = labels_one_hot is not None is_training = labels_one_hot is not None
# Normalize image pixel values to have a symmetrical range around zero.
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.5)
with tf.variable_scope(scope, reuse=reuse): with tf.variable_scope(scope, reuse=reuse):
views = tf.split( views = tf.split(
value=images, num_or_size_splits=self._params.num_views, axis=2) value=images, num_or_size_splits=self._params.num_views, axis=2)
logging.debug('Views=%d single view: %s', len(views), views[0]) logging.debug('Views=%d single view: %s', len(views), views[0])
nets = [ nets = [
self.conv_tower_fn(v, is_training, reuse=(i != 0)) self.conv_tower_fn(v, is_training, reuse=(i != 0))
for i, v in enumerate(views) for i, v in enumerate(views)
] ]
logging.debug('Conv tower: %s', nets[0]) logging.debug('Conv tower: %s', nets[0])
...@@ -374,18 +520,34 @@ class Model(object): ...@@ -374,18 +520,34 @@ class Model(object):
logging.debug('chars_logit: %s', chars_logit) logging.debug('chars_logit: %s', chars_logit)
predicted_chars, chars_log_prob, predicted_scores = ( predicted_chars, chars_log_prob, predicted_scores = (
self.char_predictions(chars_logit)) self.char_predictions(chars_logit))
if self._charset: if self._charset:
character_mapper = CharsetMapper(self._charset) character_mapper = CharsetMapper(self._charset)
predicted_text = character_mapper.get_text(predicted_chars) predicted_text = character_mapper.get_text(predicted_chars)
else: else:
predicted_text = tf.constant([]) predicted_text = tf.constant([])
text_log_prob, predicted_length = null_based_length_prediction(
chars_log_prob, self._params.null_code)
predicted_conf = lookup_indexed_value(predicted_length, text_log_prob)
# Convert predicted confidence from sum of logs to geometric mean
normalized_seq_conf = tf.exp(
tf.divide(predicted_conf,
tf.cast(predicted_length + 1, predicted_conf.dtype)),
name='normalized_seq_conf')
predicted_conf = tf.identity(predicted_conf, name='predicted_conf')
predicted_text = tf.identity(predicted_text, name='predicted_text')
predicted_length = tf.identity(predicted_length, name='predicted_length')
return OutputEndpoints( return OutputEndpoints(
chars_logit=chars_logit, chars_logit=chars_logit,
chars_log_prob=chars_log_prob, chars_log_prob=chars_log_prob,
predicted_chars=predicted_chars, predicted_chars=predicted_chars,
predicted_scores=predicted_scores, predicted_scores=predicted_scores,
predicted_text=predicted_text) predicted_length=predicted_length,
predicted_text=predicted_text,
predicted_conf=predicted_conf,
normalized_seq_conf=normalized_seq_conf)
def create_loss(self, data, endpoints): def create_loss(self, data, endpoints):
"""Creates all losses required to train the model. """Creates all losses required to train the model.
...@@ -413,15 +575,15 @@ class Model(object): ...@@ -413,15 +575,15 @@ class Model(object):
Uses the same method as in https://arxiv.org/abs/1512.00567. Uses the same method as in https://arxiv.org/abs/1512.00567.
Args: Args:
chars_labels: ground truth ids of charactes, chars_labels: ground truth ids of charactes, shape=[batch_size,
shape=[batch_size, seq_length]; seq_length];
weight: label-smoothing regularization weight. weight: label-smoothing regularization weight.
Returns: Returns:
A sensor with the same shape as the input. A sensor with the same shape as the input.
""" """
one_hot_labels = tf.one_hot( one_hot_labels = tf.one_hot(
chars_labels, depth=self._params.num_char_classes, axis=-1) chars_labels, depth=self._params.num_char_classes, axis=-1)
pos_weight = 1.0 - weight pos_weight = 1.0 - weight
neg_weight = weight / self._params.num_char_classes neg_weight = weight / self._params.num_char_classes
return one_hot_labels * pos_weight + neg_weight return one_hot_labels * pos_weight + neg_weight
...@@ -433,10 +595,10 @@ class Model(object): ...@@ -433,10 +595,10 @@ class Model(object):
also ignore all null chars after the first one. also ignore all null chars after the first one.
Args: Args:
chars_logits: logits for predicted characters, chars_logits: logits for predicted characters, shape=[batch_size,
shape=[batch_size, seq_length, num_char_classes]; seq_length, num_char_classes];
chars_labels: ground truth ids of characters, chars_labels: ground truth ids of characters, shape=[batch_size,
shape=[batch_size, seq_length]; seq_length];
mparams: method hyper parameters. mparams: method hyper parameters.
Returns: Returns:
...@@ -446,7 +608,7 @@ class Model(object): ...@@ -446,7 +608,7 @@ class Model(object):
with tf.variable_scope('sequence_loss_fn/SLF'): with tf.variable_scope('sequence_loss_fn/SLF'):
if mparams.label_smoothing > 0: if mparams.label_smoothing > 0:
smoothed_one_hot_labels = self.label_smoothing_regularization( smoothed_one_hot_labels = self.label_smoothing_regularization(
chars_labels, mparams.label_smoothing) chars_labels, mparams.label_smoothing)
labels_list = tf.unstack(smoothed_one_hot_labels, axis=1) labels_list = tf.unstack(smoothed_one_hot_labels, axis=1)
else: else:
# NOTE: in case of sparse softmax we are not using one-hot # NOTE: in case of sparse softmax we are not using one-hot
...@@ -459,20 +621,20 @@ class Model(object): ...@@ -459,20 +621,20 @@ class Model(object):
else: else:
# Suppose that reject character is the last in the charset. # Suppose that reject character is the last in the charset.
reject_char = tf.constant( reject_char = tf.constant(
self._params.num_char_classes - 1, self._params.num_char_classes - 1,
shape=(batch_size, seq_length), shape=(batch_size, seq_length),
dtype=tf.int64) dtype=tf.int64)
known_char = tf.not_equal(chars_labels, reject_char) known_char = tf.not_equal(chars_labels, reject_char)
weights = tf.to_float(known_char) weights = tf.to_float(known_char)
logits_list = tf.unstack(chars_logits, axis=1) logits_list = tf.unstack(chars_logits, axis=1)
weights_list = tf.unstack(weights, axis=1) weights_list = tf.unstack(weights, axis=1)
loss = tf.contrib.legacy_seq2seq.sequence_loss( loss = tf.contrib.legacy_seq2seq.sequence_loss(
logits_list, logits_list,
labels_list, labels_list,
weights_list, weights_list,
softmax_loss_function=get_softmax_loss_fn(mparams.label_smoothing), softmax_loss_function=get_softmax_loss_fn(mparams.label_smoothing),
average_across_timesteps=mparams.average_across_timesteps) average_across_timesteps=mparams.average_across_timesteps)
tf.losses.add_loss(loss) tf.losses.add_loss(loss)
return loss return loss
...@@ -482,8 +644,8 @@ class Model(object): ...@@ -482,8 +644,8 @@ class Model(object):
Args: Args:
data: InputEndpoints namedtuple. data: InputEndpoints namedtuple.
endpoints: OutputEndpoints namedtuple. endpoints: OutputEndpoints namedtuple.
charset: A dictionary with mapping between character codes and charset: A dictionary with mapping between character codes and unicode
unicode characters. Use the one provided by a dataset.charset. characters. Use the one provided by a dataset.charset.
is_training: If True will create summary prefixes for training job, is_training: If True will create summary prefixes for training job,
otherwise - for evaluation. otherwise - for evaluation.
...@@ -507,7 +669,7 @@ class Model(object): ...@@ -507,7 +669,7 @@ class Model(object):
if is_training: if is_training:
tf.summary.image( tf.summary.image(
sname('image/orig'), data.images_orig, max_outputs=max_outputs) sname('image/orig'), data.images_orig, max_outputs=max_outputs)
for var in tf.trainable_variables(): for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var) tf.summary.histogram(var.op.name, var)
return None return None
...@@ -520,32 +682,35 @@ class Model(object): ...@@ -520,32 +682,35 @@ class Model(object):
names_to_values[name] = value_update_tuple[0] names_to_values[name] = value_update_tuple[0]
names_to_updates[name] = value_update_tuple[1] names_to_updates[name] = value_update_tuple[1]
use_metric('CharacterAccuracy', use_metric(
metrics.char_accuracy( 'CharacterAccuracy',
endpoints.predicted_chars, metrics.char_accuracy(
data.labels, endpoints.predicted_chars,
streaming=True, data.labels,
rej_char=self._params.null_code)) streaming=True,
rej_char=self._params.null_code))
# Sequence accuracy computed by cutting sequence at the first null char # Sequence accuracy computed by cutting sequence at the first null char
use_metric('SequenceAccuracy', use_metric(
metrics.sequence_accuracy( 'SequenceAccuracy',
endpoints.predicted_chars, metrics.sequence_accuracy(
data.labels, endpoints.predicted_chars,
streaming=True, data.labels,
rej_char=self._params.null_code)) streaming=True,
rej_char=self._params.null_code))
for name, value in names_to_values.items(): for name, value in names_to_values.items():
summary_name = 'eval/' + name summary_name = 'eval/' + name
tf.summary.scalar(summary_name, tf.Print(value, [value], summary_name)) tf.summary.scalar(summary_name, tf.Print(value, [value], summary_name))
return list(names_to_updates.values()) return list(names_to_updates.values())
def create_init_fn_to_restore(self, master_checkpoint, def create_init_fn_to_restore(self,
master_checkpoint,
inception_checkpoint=None): inception_checkpoint=None):
"""Creates an init operations to restore weights from various checkpoints. """Creates an init operations to restore weights from various checkpoints.
Args: Args:
master_checkpoint: path to a checkpoint which contains all weights for master_checkpoint: path to a checkpoint which contains all weights for the
the whole model. whole model.
inception_checkpoint: path to a checkpoint which contains weights for the inception_checkpoint: path to a checkpoint which contains weights for the
inception part only. inception part only.
...@@ -556,8 +721,8 @@ class Model(object): ...@@ -556,8 +721,8 @@ class Model(object):
all_feed_dict = {} all_feed_dict = {}
def assign_from_checkpoint(variables, checkpoint): def assign_from_checkpoint(variables, checkpoint):
logging.info('Request to re-store %d weights from %s', logging.info('Request to re-store %d weights from %s', len(variables),
len(variables), checkpoint) checkpoint)
if not variables: if not variables:
logging.error('Can\'t find any variables to restore.') logging.error('Can\'t find any variables to restore.')
sys.exit(1) sys.exit(1)
...@@ -565,15 +730,18 @@ class Model(object): ...@@ -565,15 +730,18 @@ class Model(object):
all_assign_ops.append(assign_op) all_assign_ops.append(assign_op)
all_feed_dict.update(feed_dict) all_feed_dict.update(feed_dict)
logging.info('variables_to_restore:\n%s' % utils.variables_to_restore().keys()) logging.info('variables_to_restore:\n%s',
logging.info('moving_average_variables:\n%s' % [v.op.name for v in tf.moving_average_variables()]) utils.variables_to_restore().keys())
logging.info('trainable_variables:\n%s' % [v.op.name for v in tf.trainable_variables()]) logging.info('moving_average_variables:\n%s',
[v.op.name for v in tf.moving_average_variables()])
logging.info('trainable_variables:\n%s',
[v.op.name for v in tf.trainable_variables()])
if master_checkpoint: if master_checkpoint:
assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint) assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint)
if inception_checkpoint: if inception_checkpoint:
variables = utils.variables_to_restore( variables = utils.variables_to_restore(
'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True) 'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True)
assign_from_checkpoint(variables, inception_checkpoint) assign_from_checkpoint(variables, inception_checkpoint)
def init_assign_fn(sess): def init_assign_fn(sess):
......
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts existing checkpoint into a SavedModel.
Usage example:
python model_export.py \
--logtostderr --checkpoint=model.ckpt-399731 \
--export_dir=/tmp/attention_ocr_export
"""
import os
import tensorflow as tf
from tensorflow import app
from tensorflow.contrib import slim
from tensorflow.python.platform import flags
import common_flags
import model_export_lib
FLAGS = flags.FLAGS
common_flags.define()
flags.DEFINE_string('export_dir', None, 'Directory to export model files to.')
flags.DEFINE_integer(
'image_width', None,
'Image width used during training (or crop width if used)'
' If not set, the dataset default is used instead.')
flags.DEFINE_integer(
'image_height', None,
'Image height used during training(or crop height if used)'
' If not set, the dataset default is used instead.')
flags.DEFINE_string('work_dir', '/tmp', 'A directory to store temporary files.')
flags.DEFINE_integer('version_number', 1, 'Version number of the model')
flags.DEFINE_bool(
'export_for_serving', True,
'Whether the exported model accepts serialized tf.Example '
'protos as input')
def get_checkpoint_path():
"""Returns a path to a checkpoint based on specified commandline flags.
In order to specify a full path to a checkpoint use --checkpoint flag.
Alternatively, if --train_log_dir was specified it will return a path to the
most recent checkpoint.
Raises:
ValueError: in case it can't find a checkpoint.
Returns:
A string.
"""
if FLAGS.checkpoint:
return FLAGS.checkpoint
else:
model_save_path = tf.train.latest_checkpoint(FLAGS.train_log_dir)
if not model_save_path:
raise ValueError('Can\'t find a checkpoint in: %s' % FLAGS.train_log_dir)
return model_save_path
def export_model(export_dir,
export_for_serving,
batch_size=None,
crop_image_width=None,
crop_image_height=None):
"""Exports a model to the named directory.
Note that --datatset_name and --checkpoint are required and parsed by the
underlying module common_flags.
Args:
export_dir: The output dir where model is exported to.
export_for_serving: If True, expects a serialized image as input and attach
image normalization as part of exported graph.
batch_size: For non-serving export, the input batch_size needs to be
specified.
crop_image_width: Width of the input image. Uses the dataset default if
None.
crop_image_height: Height of the input image. Uses the dataset default if
None.
Returns:
Returns the model signature_def.
"""
# Dataset object used only to get all parameters for the model.
dataset = common_flags.create_dataset(split_name='test')
model = common_flags.create_model(
dataset.num_char_classes,
dataset.max_sequence_length,
dataset.num_of_views,
dataset.null_code,
charset=dataset.charset)
dataset_image_height, dataset_image_width, image_depth = dataset.image_shape
# Add check for charmap file
if not os.path.exists(dataset.charset_file):
raise ValueError('No charset defined at {}: export will fail'.format(
dataset.charset))
# Default to dataset dimensions, otherwise use provided dimensions.
image_width = crop_image_width or dataset_image_width
image_height = crop_image_height or dataset_image_height
if export_for_serving:
images_orig = tf.placeholder(
tf.string, shape=[batch_size], name='tf_example')
images_orig_float = model_export_lib.generate_tfexample_image(
images_orig,
image_height,
image_width,
image_depth,
name='float_images')
else:
images_shape = (batch_size, image_height, image_width, image_depth)
images_orig = tf.placeholder(
tf.uint8, shape=images_shape, name='original_image')
images_orig_float = tf.image.convert_image_dtype(
images_orig, dtype=tf.float32, name='float_images')
endpoints = model.create_base(images_orig_float, labels_one_hot=None)
sess = tf.Session()
saver = tf.train.Saver(slim.get_variables_to_restore(), sharded=True)
saver.restore(sess, get_checkpoint_path())
tf.logging.info('Model restored successfully.')
# Create model signature.
if export_for_serving:
input_tensors = {
tf.saved_model.signature_constants.CLASSIFY_INPUTS: images_orig
}
else:
input_tensors = {'images': images_orig}
signature_inputs = model_export_lib.build_tensor_info(input_tensors)
# NOTE: Tensors 'image_float' and 'chars_logit' are used by the inference
# or to compute saliency maps.
output_tensors = {
'images_float': images_orig_float,
'predictions': endpoints.predicted_chars,
'scores': endpoints.predicted_scores,
'chars_logit': endpoints.chars_logit,
'predicted_length': endpoints.predicted_length,
'predicted_text': endpoints.predicted_text,
'predicted_conf': endpoints.predicted_conf,
'normalized_seq_conf': endpoints.normalized_seq_conf
}
for i, t in enumerate(
model_export_lib.attention_ocr_attention_masks(
dataset.max_sequence_length)):
output_tensors['attention_mask_%d' % i] = t
signature_outputs = model_export_lib.build_tensor_info(output_tensors)
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME)
# Save model.
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
},
main_op=tf.tables_initializer(),
strip_default_attrs=True)
builder.save()
tf.logging.info('Model has been exported to %s' % export_dir)
return signature_def
def main(unused_argv):
if os.path.exists(FLAGS.export_dir):
raise ValueError('export_dir already exists: exporting will fail')
export_model(FLAGS.export_dir, FLAGS.export_for_serving, FLAGS.batch_size,
FLAGS.image_width, FLAGS.image_height)
if __name__ == '__main__':
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('export_dir')
app.run(main)
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for exporting Attention OCR model."""
import tensorflow as tf
# Function borrowed from research/object_detection/core/preprocessor.py
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width,
channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.cast(image, dtype=tf.float32)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def generate_tfexample_image(input_example_strings,
image_height,
image_width,
image_channels,
name=None):
"""Parses a 1D tensor of serialized tf.Example protos and returns image batch.
Args:
input_example_strings: A 1-Dimensional tensor of size [batch_size] and type
tf.string containing a serialized Example proto per image.
image_height: First image dimension.
image_width: Second image dimension.
image_channels: Third image dimension.
name: optional tensor name.
Returns:
A tensor with shape [batch_size, height, width, channels] of type float32
with values in the range [0..1]
"""
batch_size = tf.shape(input_example_strings)[0]
images_shape = tf.stack(
[batch_size, image_height, image_width, image_channels])
tf_example_image_key = 'image/encoded'
feature_configs = {
tf_example_image_key:
tf.FixedLenFeature(
image_height * image_width * image_channels, dtype=tf.float32)
}
feature_tensors = tf.parse_example(input_example_strings, feature_configs)
float_images = tf.reshape(
normalize_image(
feature_tensors[tf_example_image_key],
original_minval=0.0,
original_maxval=255.0,
target_minval=0.0,
target_maxval=1.0),
images_shape,
name=name)
return float_images
def attention_ocr_attention_masks(num_characters):
# TODO(gorban): use tensors directly after replacing LSTM unroll methods.
prefix = ('AttentionOcr_v1/'
'sequence_logit_fn/SQLR/LSTM/attention_decoder/Attention_0')
names = ['%s/Softmax:0' % (prefix)]
for i in range(1, num_characters):
names += ['%s_%d/Softmax:0' % (prefix, i)]
return [tf.get_default_graph().get_tensor_by_name(n) for n in names]
def build_tensor_info(tensor_dict):
return {
k: tf.saved_model.utils.build_tensor_info(t)
for k, t in tensor_dict.items()
}
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_export."""
import os
import numpy as np
from absl.testing import flagsaver
import tensorflow as tf
import common_flags
import model_export
_CHECKPOINT = 'model.ckpt-399731'
_CHECKPOINT_URL = (
'http://download.tensorflow.org/models/attention_ocr_2017_08_09.tar.gz')
def _clean_up():
tf.gfile.DeleteRecursively(tf.test.get_temp_dir())
def _create_tf_example_string(image):
"""Create a serialized tf.Example proto for feeding the model."""
example = tf.train.Example()
example.features.feature['image/encoded'].float_list.value.extend(
list(np.reshape(image, (-1))))
return example.SerializeToString()
class AttentionOcrExportTest(tf.test.TestCase):
"""Tests for model_export.export_model."""
def setUp(self):
for suffix in ['.meta', '.index', '.data-00000-of-00001']:
filename = _CHECKPOINT + suffix
self.assertTrue(
tf.gfile.Exists(filename),
msg='Missing checkpoint file %s. '
'Please download and extract it from %s' %
(filename, _CHECKPOINT_URL))
tf.flags.FLAGS.dataset_name = 'fsns'
tf.flags.FLAGS.checkpoint = _CHECKPOINT
tf.flags.FLAGS.dataset_dir = os.path.join(
os.path.dirname(__file__), 'datasets/testdata/fsns')
tf.test.TestCase.setUp(self)
_clean_up()
self.export_dir = os.path.join(tf.test.get_temp_dir(), 'exported_model')
self.minimal_output_signature = {
'predictions': 'AttentionOcr_v1/predicted_chars:0',
'scores': 'AttentionOcr_v1/predicted_scores:0',
'predicted_length': 'AttentionOcr_v1/predicted_length:0',
'predicted_text': 'AttentionOcr_v1/predicted_text:0',
'predicted_conf': 'AttentionOcr_v1/predicted_conf:0',
'normalized_seq_conf': 'AttentionOcr_v1/normalized_seq_conf:0'
}
def create_input_feed(self, graph_def, serving):
"""Returns the input feed for the model.
Creates random images, according to the size specified by dataset_name,
format it in the correct way depending on whether the model was exported
for serving, and return the correctly keyed feed_dict for inference.
Args:
graph_def: Graph definition of the loaded model.
serving: Whether the model was exported for Serving.
Returns:
The feed_dict suitable for model inference.
"""
# Creates a dataset based on FLAGS.dataset_name.
self.dataset = common_flags.create_dataset('test')
# Create some random images to test inference for any dataset.
self.images = {
'img1':
np.random.uniform(low=64, high=192,
size=self.dataset.image_shape).astype('uint8'),
'img2':
np.random.uniform(low=32, high=224,
size=self.dataset.image_shape).astype('uint8'),
}
signature_def = graph_def.signature_def[
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
if serving:
input_name = signature_def.inputs[
tf.saved_model.signature_constants.CLASSIFY_INPUTS].name
# Model for serving takes input: inputs['inputs'] = 'tf_example:0'
feed_dict = {
input_name: [
_create_tf_example_string(self.images['img1']),
_create_tf_example_string(self.images['img2'])
]
}
else:
input_name = signature_def.inputs['images'].name
# Model for direct use takes input: inputs['images'] = 'original_image:0'
feed_dict = {
input_name: np.stack([self.images['img1'], self.images['img2']])
}
return feed_dict
def verify_export_load_and_inference(self, export_for_serving=False):
"""Verify exported model can be loaded and inference can run successfully.
This function will load the exported model in self.export_dir, then create
some fake images according to the specification of FLAGS.dataset_name.
It then feeds the input through the model, and verify the minimal set of
output signatures are present.
Note: Model and dataset creation in the underlying library depends on the
following commandline flags:
FLAGS.dataset_name
Args:
export_for_serving: True if the model was exported for Serving. This
affects how input is fed into the model.
"""
tf.reset_default_graph()
sess = tf.Session()
graph_def = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=self.export_dir)
feed_dict = self.create_input_feed(graph_def, export_for_serving)
results = sess.run(self.minimal_output_signature, feed_dict=feed_dict)
out_shape = (2,)
self.assertEqual(np.shape(results['predicted_conf']), out_shape)
self.assertEqual(np.shape(results['predicted_text']), out_shape)
self.assertEqual(np.shape(results['predicted_length']), out_shape)
self.assertEqual(np.shape(results['normalized_seq_conf']), out_shape)
out_shape = (2, self.dataset.max_sequence_length)
self.assertEqual(np.shape(results['scores']), out_shape)
self.assertEqual(np.shape(results['predictions']), out_shape)
@flagsaver.flagsaver
def test_fsns_export_for_serving_and_load_inference(self):
model_export.export_model(self.export_dir, True)
self.verify_export_load_and_inference(True)
@flagsaver.flagsaver
def test_fsns_export_and_load_inference(self):
model_export.export_model(self.export_dir, False, batch_size=2)
self.verify_export_load_and_inference(False)
if __name__ == '__main__':
tf.test.main()
...@@ -12,11 +12,10 @@ ...@@ -12,11 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Tests for the model.""" """Tests for the model."""
import string
import numpy as np import numpy as np
import string
import tensorflow as tf import tensorflow as tf
from tensorflow.contrib import slim from tensorflow.contrib import slim
...@@ -32,6 +31,7 @@ def create_fake_charset(num_char_classes): ...@@ -32,6 +31,7 @@ def create_fake_charset(num_char_classes):
class ModelTest(tf.test.TestCase): class ModelTest(tf.test.TestCase):
def setUp(self): def setUp(self):
tf.test.TestCase.setUp(self) tf.test.TestCase.setUp(self)
...@@ -51,18 +51,21 @@ class ModelTest(tf.test.TestCase): ...@@ -51,18 +51,21 @@ class ModelTest(tf.test.TestCase):
self.chars_logit_shape = (self.batch_size, self.seq_length, self.chars_logit_shape = (self.batch_size, self.seq_length,
self.num_char_classes) self.num_char_classes)
self.length_logit_shape = (self.batch_size, self.seq_length + 1) self.length_logit_shape = (self.batch_size, self.seq_length + 1)
# Placeholder knows image dimensions, but not batch size.
self.input_images = tf.placeholder(
tf.float32,
shape=(None, self.image_height, self.image_width, 3),
name='input_node')
self.initialize_fakes() self.initialize_fakes()
def initialize_fakes(self): def initialize_fakes(self):
self.images_shape = (self.batch_size, self.image_height, self.image_width, self.images_shape = (self.batch_size, self.image_height, self.image_width,
3) 3)
self.fake_images = tf.constant( self.fake_images = self.rng.randint(
self.rng.randint(low=0, high=255, low=0, high=255, size=self.images_shape).astype('float32')
size=self.images_shape).astype('float32'), self.fake_conv_tower_np = self.rng.randn(*self.conv_tower_shape).astype(
name='input_node') 'float32')
self.fake_conv_tower_np = self.rng.randn(
*self.conv_tower_shape).astype('float32')
self.fake_conv_tower = tf.constant(self.fake_conv_tower_np) self.fake_conv_tower = tf.constant(self.fake_conv_tower_np)
self.fake_logits = tf.constant( self.fake_logits = tf.constant(
self.rng.randn(*self.chars_logit_shape).astype('float32')) self.rng.randn(*self.chars_logit_shape).astype('float32'))
...@@ -74,33 +77,44 @@ class ModelTest(tf.test.TestCase): ...@@ -74,33 +77,44 @@ class ModelTest(tf.test.TestCase):
def create_model(self, charset=None): def create_model(self, charset=None):
return model.Model( return model.Model(
self.num_char_classes, self.seq_length, num_views=4, null_code=62, self.num_char_classes,
self.seq_length,
num_views=4,
null_code=62,
charset=charset) charset=charset)
def test_char_related_shapes(self): def test_char_related_shapes(self):
ocr_model = self.create_model() charset = create_fake_charset(self.num_char_classes)
ocr_model = self.create_model(charset=charset)
with self.test_session() as sess: with self.test_session() as sess:
endpoints_tf = ocr_model.create_base( endpoints_tf = ocr_model.create_base(
images=self.fake_images, labels_one_hot=None) images=self.input_images, labels_one_hot=None)
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
endpoints = sess.run(endpoints_tf) tf.tables_initializer().run()
endpoints = sess.run(
self.assertEqual((self.batch_size, self.seq_length, endpoints_tf, feed_dict={self.input_images: self.fake_images})
self.num_char_classes), endpoints.chars_logit.shape)
self.assertEqual((self.batch_size, self.seq_length, self.assertEqual(
self.num_char_classes), endpoints.chars_log_prob.shape) (self.batch_size, self.seq_length, self.num_char_classes),
endpoints.chars_logit.shape)
self.assertEqual(
(self.batch_size, self.seq_length, self.num_char_classes),
endpoints.chars_log_prob.shape)
self.assertEqual((self.batch_size, self.seq_length), self.assertEqual((self.batch_size, self.seq_length),
endpoints.predicted_chars.shape) endpoints.predicted_chars.shape)
self.assertEqual((self.batch_size, self.seq_length), self.assertEqual((self.batch_size, self.seq_length),
endpoints.predicted_scores.shape) endpoints.predicted_scores.shape)
self.assertEqual((self.batch_size,), endpoints.predicted_text.shape)
self.assertEqual((self.batch_size,), endpoints.predicted_conf.shape)
self.assertEqual((self.batch_size,), endpoints.normalized_seq_conf.shape)
def test_predicted_scores_are_within_range(self): def test_predicted_scores_are_within_range(self):
ocr_model = self.create_model() ocr_model = self.create_model()
_, _, scores = ocr_model.char_predictions(self.fake_logits) _, _, scores = ocr_model.char_predictions(self.fake_logits)
with self.test_session() as sess: with self.test_session() as sess:
scores_np = sess.run(scores) scores_np = sess.run(
scores, feed_dict={self.input_images: self.fake_images})
values_in_range = (scores_np >= 0.0) & (scores_np <= 1.0) values_in_range = (scores_np >= 0.0) & (scores_np <= 1.0)
self.assertTrue( self.assertTrue(
...@@ -111,10 +125,11 @@ class ModelTest(tf.test.TestCase): ...@@ -111,10 +125,11 @@ class ModelTest(tf.test.TestCase):
def test_conv_tower_shape(self): def test_conv_tower_shape(self):
with self.test_session() as sess: with self.test_session() as sess:
ocr_model = self.create_model() ocr_model = self.create_model()
conv_tower = ocr_model.conv_tower_fn(self.fake_images) conv_tower = ocr_model.conv_tower_fn(self.input_images)
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
conv_tower_np = sess.run(conv_tower) conv_tower_np = sess.run(
conv_tower, feed_dict={self.input_images: self.fake_images})
self.assertEqual(self.conv_tower_shape, conv_tower_np.shape) self.assertEqual(self.conv_tower_shape, conv_tower_np.shape)
...@@ -124,11 +139,12 @@ class ModelTest(tf.test.TestCase): ...@@ -124,11 +139,12 @@ class ModelTest(tf.test.TestCase):
# updates, gradients and variances. It also depends on the type of used # updates, gradients and variances. It also depends on the type of used
# optimizer. # optimizer.
ocr_model = self.create_model() ocr_model = self.create_model()
ocr_model.create_base(images=self.fake_images, labels_one_hot=None) ocr_model.create_base(images=self.input_images, labels_one_hot=None)
with self.test_session() as sess: with self.test_session() as sess:
tfprof_root = tf.profiler.profile( tfprof_root = tf.profiler.profile(
sess.graph, sess.graph,
options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()) options=tf.profiler.ProfileOptionBuilder
.trainable_variables_parameter())
model_size_bytes = 4 * tfprof_root.total_parameters model_size_bytes = 4 * tfprof_root.total_parameters
self.assertLess(model_size_bytes, 1 * 2**30) self.assertLess(model_size_bytes, 1 * 2**30)
...@@ -158,7 +174,7 @@ class ModelTest(tf.test.TestCase): ...@@ -158,7 +174,7 @@ class ModelTest(tf.test.TestCase):
loss = model.sequence_loss_fn(self.fake_logits, self.fake_labels) loss = model.sequence_loss_fn(self.fake_logits, self.fake_labels)
with self.test_session() as sess: with self.test_session() as sess:
loss_np = sess.run(loss) loss_np = sess.run(loss, feed_dict={self.input_images: self.fake_images})
# This test checks that the loss function is 'runnable'. # This test checks that the loss function is 'runnable'.
self.assertEqual(loss_np.shape, tuple()) self.assertEqual(loss_np.shape, tuple())
...@@ -172,19 +188,20 @@ class ModelTest(tf.test.TestCase): ...@@ -172,19 +188,20 @@ class ModelTest(tf.test.TestCase):
Returns: Returns:
a list of tensors with encoded image coordinates in them. a list of tensors with encoded image coordinates in them.
""" """
batch_size, h, w, _ = net.shape.as_list() batch_size = tf.shape(net)[0]
_, h, w, _ = net.shape.as_list()
h_loc = [ h_loc = [
tf.tile( tf.tile(
tf.reshape( tf.reshape(
tf.contrib.layers.one_hot_encoding( tf.contrib.layers.one_hot_encoding(
tf.constant([i]), num_classes=h), [h, 1]), [1, w]) tf.constant([i]), num_classes=h), [h, 1]), [1, w])
for i in range(h) for i in range(h)
] ]
h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2) h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
w_loc = [ w_loc = [
tf.tile( tf.tile(
tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w), tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w),
[h, 1]) for i in range(w) [h, 1]) for i in range(w)
] ]
w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2) w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
loc = tf.concat([h_loc, w_loc], 2) loc = tf.concat([h_loc, w_loc], 2)
...@@ -197,11 +214,12 @@ class ModelTest(tf.test.TestCase): ...@@ -197,11 +214,12 @@ class ModelTest(tf.test.TestCase):
conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower) conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower)
with self.test_session() as sess: with self.test_session() as sess:
conv_w_coords = sess.run(conv_w_coords_tf) conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
batch_size, height, width, feature_size = self.conv_tower_shape batch_size, height, width, feature_size = self.conv_tower_shape
self.assertEqual(conv_w_coords.shape, (batch_size, height, width, self.assertEqual(conv_w_coords.shape,
feature_size + height + width)) (batch_size, height, width, feature_size + height + width))
def test_disabled_coordinate_encoding_returns_features_unchanged(self): def test_disabled_coordinate_encoding_returns_features_unchanged(self):
model = self.create_model() model = self.create_model()
...@@ -209,7 +227,8 @@ class ModelTest(tf.test.TestCase): ...@@ -209,7 +227,8 @@ class ModelTest(tf.test.TestCase):
conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower) conv_w_coords_tf = model.encode_coordinates_fn(self.fake_conv_tower)
with self.test_session() as sess: with self.test_session() as sess:
conv_w_coords = sess.run(conv_w_coords_tf) conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
self.assertAllEqual(conv_w_coords, self.fake_conv_tower_np) self.assertAllEqual(conv_w_coords, self.fake_conv_tower_np)
...@@ -221,7 +240,8 @@ class ModelTest(tf.test.TestCase): ...@@ -221,7 +240,8 @@ class ModelTest(tf.test.TestCase):
conv_w_coords_tf = model.encode_coordinates_fn(fake_conv_tower) conv_w_coords_tf = model.encode_coordinates_fn(fake_conv_tower)
with self.test_session() as sess: with self.test_session() as sess:
conv_w_coords = sess.run(conv_w_coords_tf) conv_w_coords = sess.run(
conv_w_coords_tf, feed_dict={self.input_images: self.fake_images})
# Original features # Original features
self.assertAllEqual(conv_w_coords[0, :, :, :4], self.assertAllEqual(conv_w_coords[0, :, :, :4],
...@@ -261,10 +281,11 @@ class ModelTest(tf.test.TestCase): ...@@ -261,10 +281,11 @@ class ModelTest(tf.test.TestCase):
class CharsetMapperTest(tf.test.TestCase): class CharsetMapperTest(tf.test.TestCase):
def test_text_corresponds_to_ids(self): def test_text_corresponds_to_ids(self):
charset = create_fake_charset(36) charset = create_fake_charset(36)
ids = tf.constant( ids = tf.constant([[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]],
[[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]], dtype=tf.int64) dtype=tf.int64)
charset_mapper = model.CharsetMapper(charset) charset_mapper = model.CharsetMapper(charset)
with self.test_session() as sess: with self.test_session() as sess:
......
...@@ -111,7 +111,7 @@ class SequenceLayerBase(object): ...@@ -111,7 +111,7 @@ class SequenceLayerBase(object):
self._mparams = method_params self._mparams = method_params
self._net = net self._net = net
self._labels_one_hot = labels_one_hot self._labels_one_hot = labels_one_hot
self._batch_size = net.get_shape().dims[0].value self._batch_size = tf.shape(net)[0]
# Initialize parameters for char logits which will be computed on the fly # Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder. # inside an LSTM decoder.
...@@ -275,7 +275,7 @@ class NetSlice(SequenceLayerBase): ...@@ -275,7 +275,7 @@ class NetSlice(SequenceLayerBase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(NetSlice, self).__init__(*args, **kwargs) super(NetSlice, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros( self._zero_label = tf.zeros(
[self._batch_size, self._params.num_char_classes]) tf.stack([self._batch_size, self._params.num_char_classes]))
def get_image_feature(self, char_index): def get_image_feature(self, char_index):
"""Returns a subset of image features for a character. """Returns a subset of image features for a character.
...@@ -352,7 +352,7 @@ class Attention(SequenceLayerBase): ...@@ -352,7 +352,7 @@ class Attention(SequenceLayerBase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(Attention, self).__init__(*args, **kwargs) super(Attention, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros( self._zero_label = tf.zeros(
[self._batch_size, self._params.num_char_classes]) tf.stack([self._batch_size, self._params.num_char_classes]))
def get_eval_input(self, prev, i): def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details.""" """See SequenceLayerBase.get_eval_input for details."""
......
...@@ -78,3 +78,20 @@ def variables_to_restore(scope=None, strip_scope=False): ...@@ -78,3 +78,20 @@ def variables_to_restore(scope=None, strip_scope=False):
return variable_map return variable_map
else: else:
return {v.op.name: v for v in slim.get_variables_to_restore()} return {v.op.name: v for v in slim.get_variables_to_restore()}
def ConvertAllInputsToTensors(func):
"""A decorator to convert all function's inputs into tensors.
Args:
func: a function to decorate.
Returns:
A decorated function.
"""
def FuncWrapper(*args):
tensors = [tf.convert_to_tensor(a) for a in args]
return func(*tensors)
return FuncWrapper
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment