"git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "80572c8345d4404ad19e3b75bffdb84bec30ba04"
inputs.py 31.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model input function for tf-learn object detection model."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import functools

import tensorflow as tf
from object_detection.builders import dataset_builder
25
26
from object_detection.builders import image_resizer_builder
from object_detection.builders import model_builder
27
from object_detection.builders import preprocessor_builder
28
from object_detection.core import preprocessor
29
30
31
32
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import eval_pb2
from object_detection.protos import input_reader_pb2
33
from object_detection.protos import model_pb2
34
from object_detection.protos import train_pb2
35
from object_detection.utils import config_util
36
from object_detection.utils import ops as util_ops
37
from object_detection.utils import shape_utils
38

39
40
HASH_KEY = 'hash'
HASH_BINS = 1 << 31
41
42
SERVING_FED_EXAMPLE_KEY = 'serialized_example'

43
44
45
# A map of names to methods that help build the input pipeline.
INPUT_BUILDER_UTIL_MAP = {
    'dataset_build': dataset_builder.build,
46
    'model_build': model_builder.build,
47
48
}

49

pkulzc's avatar
pkulzc committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def _multiclass_scores_or_one_hot_labels(multiclass_scores,
                                         groundtruth_boxes,
                                         groundtruth_classes, num_classes):
  """Returns one-hot encoding of classes when multiclass_scores is empty."""
  # Replace groundtruth_classes tensor with multiclass_scores tensor when its
  # non-empty. If multiclass_scores is empty fall back on groundtruth_classes
  # tensor.
  def true_fn():
    return tf.reshape(multiclass_scores,
                      [tf.shape(groundtruth_boxes)[0], num_classes])
  def false_fn():
    return tf.one_hot(groundtruth_classes, num_classes)

  return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn)


66
67
68
69
70
71
def transform_input_data(tensor_dict,
                         model_preprocess_fn,
                         image_resizer_fn,
                         num_classes,
                         data_augmentation_fn=None,
                         merge_multiple_boxes=False,
72
                         retain_original_image=False,
73
                         use_multiclass_scores=False,
74
75
                         use_bfloat16=False,
                         retain_original_image_additional_channels=False):
76
77
78
  """A single function that is responsible for all input data transformations.

  Data transformation functions are applied in the following order.
79
80
81
82
83
84
  1. If key fields.InputDataFields.image_additional_channels is present in
     tensor_dict, the additional channels will be merged into
     fields.InputDataFields.image.
  2. data_augmentation_fn (optional): applied on tensor_dict.
  3. model_preprocess_fn: applied only on image tensor in tensor_dict.
  4. image_resizer_fn: applied on original image and instance mask tensor in
85
     tensor_dict.
86
87
  5. one_hot_encoding: applied to classes tensor in tensor_dict.
  6. merge_multiple_boxes (optional): when groundtruth boxes are exactly the
88
89
90
91
92
93
94
95
96
     same they can be merged into a single box with an associated k-hot class
     label.

  Args:
    tensor_dict: dictionary containing input tensors keyed by
      fields.InputDataFields.
    model_preprocess_fn: model's preprocess function to apply on image tensor.
      This function must take in a 4-D float tensor and return a 4-D preprocess
      float tensor and a tensor containing the true image shape.
97
98
99
100
    image_resizer_fn: image resizer function to apply on groundtruth instance
      `masks. This function must take a 3-D float tensor of an image and a 3-D
      tensor of instance masks and return a resized version of these along with
      the true shapes.
101
102
103
104
105
106
107
108
    num_classes: number of max classes to one-hot (or k-hot) encode the class
      labels.
    data_augmentation_fn: (optional) data augmentation function to apply on
      input `tensor_dict`.
    merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes
      and classes for a given image if the boxes are exactly the same.
    retain_original_image: (optional) whether to retain original image in the
      output dictionary.
pkulzc's avatar
pkulzc committed
109
110
111
112
    use_multiclass_scores: whether to use multiclass scores as class targets
      instead of one-hot encoding of `groundtruth_classes`. When
      this is True and multiclass_scores is empty, one-hot encoding of
      `groundtruth_classes` is used as a fallback.
113
    use_bfloat16: (optional) a bool, whether to use bfloat16 in training.
114
115
    retain_original_image_additional_channels: (optional) Whether to retain
      original image additional channels in the output dictionary.
116
117
118
119
120

  Returns:
    A dictionary keyed by fields.InputDataFields containing the tensors obtained
    after applying all the transformations.
  """
pkulzc's avatar
pkulzc committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
  out_tensor_dict = tensor_dict.copy()
  if fields.InputDataFields.multiclass_scores in out_tensor_dict:
    out_tensor_dict[
        fields.InputDataFields
        .multiclass_scores] = _multiclass_scores_or_one_hot_labels(
            out_tensor_dict[fields.InputDataFields.multiclass_scores],
            out_tensor_dict[fields.InputDataFields.groundtruth_boxes],
            out_tensor_dict[fields.InputDataFields.groundtruth_classes],
            num_classes)

  if fields.InputDataFields.groundtruth_boxes in out_tensor_dict:
    out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates(
        out_tensor_dict)
    out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict)
135

136
  if retain_original_image:
pkulzc's avatar
pkulzc committed
137
138
139
    out_tensor_dict[fields.InputDataFields.original_image] = tf.cast(
        image_resizer_fn(out_tensor_dict[fields.InputDataFields.image],
                         None)[0], tf.uint8)
140

pkulzc's avatar
pkulzc committed
141
142
143
144
  if fields.InputDataFields.image_additional_channels in out_tensor_dict:
    channels = out_tensor_dict[fields.InputDataFields.image_additional_channels]
    out_tensor_dict[fields.InputDataFields.image] = tf.concat(
        [out_tensor_dict[fields.InputDataFields.image], channels], axis=2)
145
146
147
148
    if retain_original_image_additional_channels:
      out_tensor_dict[
          fields.InputDataFields.image_additional_channels] = tf.cast(
              image_resizer_fn(channels, None)[0], tf.uint8)
149

150
151
  # Apply data augmentation ops.
  if data_augmentation_fn is not None:
pkulzc's avatar
pkulzc committed
152
    out_tensor_dict = data_augmentation_fn(out_tensor_dict)
153
154

  # Apply model preprocessing ops and resize instance masks.
pkulzc's avatar
pkulzc committed
155
  image = out_tensor_dict[fields.InputDataFields.image]
156
  preprocessed_resized_image, true_image_shape = model_preprocess_fn(
157
      tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))
158
159
160
  if use_bfloat16:
    preprocessed_resized_image = tf.cast(
        preprocessed_resized_image, tf.bfloat16)
pkulzc's avatar
pkulzc committed
161
  out_tensor_dict[fields.InputDataFields.image] = tf.squeeze(
162
      preprocessed_resized_image, axis=0)
pkulzc's avatar
pkulzc committed
163
  out_tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze(
164
      true_image_shape, axis=0)
pkulzc's avatar
pkulzc committed
165
166
  if fields.InputDataFields.groundtruth_instance_masks in out_tensor_dict:
    masks = out_tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
167
    _, resized_masks, _ = image_resizer_fn(image, masks)
168
169
    if use_bfloat16:
      resized_masks = tf.cast(resized_masks, tf.bfloat16)
pkulzc's avatar
pkulzc committed
170
171
    out_tensor_dict[
        fields.InputDataFields.groundtruth_instance_masks] = resized_masks
172
173

  label_offset = 1
pkulzc's avatar
pkulzc committed
174
  zero_indexed_groundtruth_classes = out_tensor_dict[
175
      fields.InputDataFields.groundtruth_classes] - label_offset
176
  if use_multiclass_scores:
pkulzc's avatar
pkulzc committed
177
178
179
180
181
182
183
    out_tensor_dict[
        fields.InputDataFields.groundtruth_classes] = out_tensor_dict[
            fields.InputDataFields.multiclass_scores]
  else:
    out_tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
        zero_indexed_groundtruth_classes, num_classes)
  out_tensor_dict.pop(fields.InputDataFields.multiclass_scores, None)
184

pkulzc's avatar
pkulzc committed
185
186
  if fields.InputDataFields.groundtruth_confidences in out_tensor_dict:
    groundtruth_confidences = out_tensor_dict[
187
        fields.InputDataFields.groundtruth_confidences]
188
    # Map the confidences to the one-hot encoding of classes
pkulzc's avatar
pkulzc committed
189
    out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = (
190
        tf.reshape(groundtruth_confidences, [-1, 1]) *
pkulzc's avatar
pkulzc committed
191
        out_tensor_dict[fields.InputDataFields.groundtruth_classes])
192
193
194
  else:
    groundtruth_confidences = tf.ones_like(
        zero_indexed_groundtruth_classes, dtype=tf.float32)
pkulzc's avatar
pkulzc committed
195
196
    out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = (
        out_tensor_dict[fields.InputDataFields.groundtruth_classes])
197

198
  if merge_multiple_boxes:
199
200
    merged_boxes, merged_classes, merged_confidences, _ = (
        util_ops.merge_boxes_with_multiple_labels(
pkulzc's avatar
pkulzc committed
201
            out_tensor_dict[fields.InputDataFields.groundtruth_boxes],
202
203
204
            zero_indexed_groundtruth_classes,
            groundtruth_confidences,
            num_classes))
205
    merged_classes = tf.cast(merged_classes, tf.float32)
pkulzc's avatar
pkulzc committed
206
207
208
    out_tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes
    out_tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes
    out_tensor_dict[fields.InputDataFields.groundtruth_confidences] = (
209
        merged_confidences)
pkulzc's avatar
pkulzc committed
210
211
212
  if fields.InputDataFields.groundtruth_boxes in out_tensor_dict:
    out_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape(
        out_tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]
213

pkulzc's avatar
pkulzc committed
214
  return out_tensor_dict
215
216


217
218
219
220
def pad_input_data_to_static_shapes(tensor_dict, max_num_boxes, num_classes,
                                    spatial_image_shape=None):
  """Pads input tensors to static shapes.

221
222
223
  In case num_additional_channels > 0, we assume that the additional channels
  have already been concatenated to the base image.

224
225
226
227
228
229
230
231
232
233
234
235
236
237
  Args:
    tensor_dict: Tensor dictionary of input data
    max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
      padding.
    num_classes: Number of classes in the dataset needed to compute shapes for
      padding.
    spatial_image_shape: A list of two integers of the form [height, width]
      containing expected spatial shape of the image.

  Returns:
    A dictionary keyed by fields.InputDataFields containing padding shapes for
    tensors in the dataset.

  Raises:
238
239
    ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we
      detect that additional channels have not been concatenated yet.
240
241
242
243
244
245
246
247
248
  """

  if not spatial_image_shape or spatial_image_shape == [-1, -1]:
    height, width = None, None
  else:
    height, width = spatial_image_shape  # pylint: disable=unpacking-non-sequence

  num_additional_channels = 0
  if fields.InputDataFields.image_additional_channels in tensor_dict:
249
250
    num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[
        fields.InputDataFields.image_additional_channels].shape[2])
251
252
253
254

  # We assume that if num_additional_channels > 0, then it has already been
  # concatenated to the base image (but not the ground truth).
  num_channels = 3
255
  if fields.InputDataFields.image in tensor_dict:
256
257
    num_channels = shape_utils.get_dim_as_int(
        tensor_dict[fields.InputDataFields.image].shape[2])
258
259
260
261
262
263
264

  if num_additional_channels:
    if num_additional_channels >= num_channels:
      raise ValueError(
          'Image must be already concatenated with additional channels.')

    if (fields.InputDataFields.original_image in tensor_dict and
265
266
        shape_utils.get_dim_as_int(
            tensor_dict[fields.InputDataFields.original_image].shape[2]) ==
267
268
269
270
        num_channels):
      raise ValueError(
          'Image must be already concatenated with additional channels.')

271
272
  padding_shapes = {
      fields.InputDataFields.image: [
273
          height, width, num_channels
274
      ],
pkulzc's avatar
pkulzc committed
275
      fields.InputDataFields.original_image_spatial_shape: [2],
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
      fields.InputDataFields.image_additional_channels: [
          height, width, num_additional_channels
      ],
      fields.InputDataFields.source_id: [],
      fields.InputDataFields.filename: [],
      fields.InputDataFields.key: [],
      fields.InputDataFields.groundtruth_difficult: [max_num_boxes],
      fields.InputDataFields.groundtruth_boxes: [max_num_boxes, 4],
      fields.InputDataFields.groundtruth_classes: [max_num_boxes, num_classes],
      fields.InputDataFields.groundtruth_instance_masks: [
          max_num_boxes, height, width
      ],
      fields.InputDataFields.groundtruth_is_crowd: [max_num_boxes],
      fields.InputDataFields.groundtruth_group_of: [max_num_boxes],
      fields.InputDataFields.groundtruth_area: [max_num_boxes],
      fields.InputDataFields.groundtruth_weights: [max_num_boxes],
292
293
294
      fields.InputDataFields.groundtruth_confidences: [
          max_num_boxes, num_classes
      ],
295
296
      fields.InputDataFields.num_groundtruth_boxes: [],
      fields.InputDataFields.groundtruth_label_types: [max_num_boxes],
297
      fields.InputDataFields.groundtruth_label_weights: [max_num_boxes],
298
299
      fields.InputDataFields.true_image_shape: [3],
      fields.InputDataFields.groundtruth_image_classes: [num_classes],
300
      fields.InputDataFields.groundtruth_image_confidences: [num_classes],
301
302
303
304
  }

  if fields.InputDataFields.original_image in tensor_dict:
    padding_shapes[fields.InputDataFields.original_image] = [
305
306
307
        height, width,
        shape_utils.get_dim_as_int(tensor_dict[fields.InputDataFields.
                                               original_image].shape[2])
308
309
310
311
    ]
  if fields.InputDataFields.groundtruth_keypoints in tensor_dict:
    tensor_shape = (
        tensor_dict[fields.InputDataFields.groundtruth_keypoints].shape)
312
313
314
    padding_shape = [max_num_boxes,
                     shape_utils.get_dim_as_int(tensor_shape[1]),
                     shape_utils.get_dim_as_int(tensor_shape[2])]
315
316
317
318
    padding_shapes[fields.InputDataFields.groundtruth_keypoints] = padding_shape
  if fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict:
    tensor_shape = tensor_dict[fields.InputDataFields.
                               groundtruth_keypoint_visibilities].shape
319
    padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
320
321
322
323
324
    padding_shapes[fields.InputDataFields.
                   groundtruth_keypoint_visibilities] = padding_shape

  padded_tensor_dict = {}
  for tensor_name in tensor_dict:
325
326
    padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd(
        tensor_dict[tensor_name], padding_shapes[tensor_name])
327
328
329
330
331
332
333
334

  # Make sure that the number of groundtruth boxes now reflects the
  # padded/clipped tensors.
  if fields.InputDataFields.num_groundtruth_boxes in padded_tensor_dict:
    padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = (
        tf.minimum(
            padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
            max_num_boxes))
335
336
337
  return padded_tensor_dict


338
339
340
341
342
343
344
345
346
347
348
349
350
351
def augment_input_data(tensor_dict, data_augmentation_options):
  """Applies data augmentation ops to input tensors.

  Args:
    tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields.
    data_augmentation_options: A list of tuples, where each tuple contains a
      function and a dictionary that contains arguments and their values.
      Usually, this is the output of core/preprocessor.build.

  Returns:
    A dictionary of tensors obtained by applying data augmentation ops to the
    input tensor dictionary.
  """
  tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
352
      tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0)
353
354
355
356
357

  include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
                            in tensor_dict)
  include_keypoints = (fields.InputDataFields.groundtruth_keypoints
                       in tensor_dict)
358
359
360
361
  include_label_weights = (fields.InputDataFields.groundtruth_weights
                           in tensor_dict)
  include_label_confidences = (fields.InputDataFields.groundtruth_confidences
                               in tensor_dict)
362
363
  include_multiclass_scores = (fields.InputDataFields.multiclass_scores in
                               tensor_dict)
364
365
366
  tensor_dict = preprocessor.preprocess(
      tensor_dict, data_augmentation_options,
      func_arg_map=preprocessor.get_default_func_arg_map(
367
368
          include_label_weights=include_label_weights,
          include_label_confidences=include_label_confidences,
369
          include_multiclass_scores=include_multiclass_scores,
370
371
372
373
374
375
376
          include_instance_masks=include_instance_masks,
          include_keypoints=include_keypoints))
  tensor_dict[fields.InputDataFields.image] = tf.squeeze(
      tensor_dict[fields.InputDataFields.image], axis=0)
  return tensor_dict


377
378
379
380
381
382
def _get_labels_dict(input_dict):
  """Extracts labels dict from input dict."""
  required_label_keys = [
      fields.InputDataFields.num_groundtruth_boxes,
      fields.InputDataFields.groundtruth_boxes,
      fields.InputDataFields.groundtruth_classes,
383
      fields.InputDataFields.groundtruth_weights,
384
385
386
387
388
389
  ]
  labels_dict = {}
  for key in required_label_keys:
    labels_dict[key] = input_dict[key]

  optional_label_keys = [
390
      fields.InputDataFields.groundtruth_confidences,
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
      fields.InputDataFields.groundtruth_keypoints,
      fields.InputDataFields.groundtruth_instance_masks,
      fields.InputDataFields.groundtruth_area,
      fields.InputDataFields.groundtruth_is_crowd,
      fields.InputDataFields.groundtruth_difficult
  ]

  for key in optional_label_keys:
    if key in input_dict:
      labels_dict[key] = input_dict[key]
  if fields.InputDataFields.groundtruth_difficult in labels_dict:
    labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast(
        labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32)
  return labels_dict


407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
def _replace_empty_string_with_random_number(string_tensor):
  """Returns string unchanged if non-empty, and random string tensor otherwise.

  The random string is an integer 0 and 2**63 - 1, casted as string.


  Args:
    string_tensor: A tf.tensor of dtype string.

  Returns:
    out_string: A tf.tensor of dtype string. If string_tensor contains the empty
      string, out_string will contain a random integer casted to a string.
      Otherwise string_tensor is returned unchanged.

  """

  empty_string = tf.constant('', dtype=tf.string, name='EmptyString')

  random_source_id = tf.as_string(
      tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))

  out_string = tf.cond(
      tf.equal(string_tensor, empty_string),
      true_fn=lambda: random_source_id,
      false_fn=lambda: string_tensor)

  return out_string


436
437
def _get_features_dict(input_dict):
  """Extracts features dict from input dict."""
438
439
440
441
442

  source_id = _replace_empty_string_with_random_number(
      input_dict[fields.InputDataFields.source_id])

  hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
443
444
445
446
447
  features = {
      fields.InputDataFields.image:
          input_dict[fields.InputDataFields.image],
      HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
      fields.InputDataFields.true_image_shape:
pkulzc's avatar
pkulzc committed
448
449
450
          input_dict[fields.InputDataFields.true_image_shape],
      fields.InputDataFields.original_image_spatial_shape:
          input_dict[fields.InputDataFields.original_image_spatial_shape]
451
452
453
454
  }
  if fields.InputDataFields.original_image in input_dict:
    features[fields.InputDataFields.original_image] = input_dict[
        fields.InputDataFields.original_image]
455
456
457
  if fields.InputDataFields.image_additional_channels in input_dict:
    features[fields.InputDataFields.image_additional_channels] = input_dict[
        fields.InputDataFields.image_additional_channels]
458
459
460
  return features


461
462
def create_train_input_fn(train_config, train_input_config,
                          model_config):
463
464
465
466
467
  """Creates a train `input` function for `Estimator`.

  Args:
    train_config: A train_pb2.TrainConfig.
    train_input_config: An input_reader_pb2.InputReader.
468
    model_config: A model_pb2.DetectionModel.
469
470
471
472
473

  Returns:
    `input_fn` for `Estimator` in TRAIN mode.
  """

474
  def _train_input_fn(params=None):
475
476
    return train_input(train_config, train_input_config, model_config,
                       params=params)
477

478
  return _train_input_fn
479

480

481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
def train_input(train_config, train_input_config,
                model_config, model=None, params=None):
  """Returns `features` and `labels` tensor dictionaries for training.

  Args:
    train_config: A train_pb2.TrainConfig.
    train_input_config: An input_reader_pb2.InputReader.
    model_config: A model_pb2.DetectionModel.
    model: A pre-constructed Detection Model.
      If None, one will be created from the config.
    params: Parameter dictionary passed from the estimator.

  Returns:
    A tf.data.Dataset that holds (features, labels) tuple.

    features: Dictionary of feature tensors.
      features[fields.InputDataFields.image] is a [batch_size, H, W, C]
        float32 tensor with preprocessed images.
      features[HASH_KEY] is a [batch_size] int32 tensor representing unique
        identifiers for the images.
      features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
        int32 tensor representing the true image shapes, as preprocessed
        images could be padded.
      features[fields.InputDataFields.original_image] (optional) is a
        [batch_size, H, W, C] float32 tensor with original images.
    labels: Dictionary of groundtruth tensors.
      labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
        int32 tensor indicating the number of groundtruth boxes.
      labels[fields.InputDataFields.groundtruth_boxes] is a
        [batch_size, num_boxes, 4] float32 tensor containing the corners of
        the groundtruth boxes.
      labels[fields.InputDataFields.groundtruth_classes] is a
        [batch_size, num_boxes, num_classes] float32 one-hot tensor of
        classes.
      labels[fields.InputDataFields.groundtruth_weights] is a
        [batch_size, num_boxes] float32 tensor containing groundtruth weights
        for the boxes.
      -- Optional --
      labels[fields.InputDataFields.groundtruth_instance_masks] is a
        [batch_size, num_boxes, H, W] float32 tensor containing only binary
        values, which represent instance masks for objects.
      labels[fields.InputDataFields.groundtruth_keypoints] is a
        [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
        keypoints for each box.

  Raises:
    TypeError: if the `train_config`, `train_input_config` or `model_config`
      are not of the correct type.
  """
  if not isinstance(train_config, train_pb2.TrainConfig):
    raise TypeError('For training mode, the `train_config` must be a '
                    'train_pb2.TrainConfig.')
  if not isinstance(train_input_config, input_reader_pb2.InputReader):
    raise TypeError('The `train_input_config` must be a '
                    'input_reader_pb2.InputReader.')
  if not isinstance(model_config, model_pb2.DetectionModel):
    raise TypeError('The `model_config` must be a '
                    'model_pb2.DetectionModel.')

  if model is None:
    model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
        model_config, is_training=True).preprocess
  else:
    model_preprocess_fn = model.preprocess

  def transform_and_pad_input_data_fn(tensor_dict):
    """Combines transform and pad operation."""
    data_augmentation_options = [
        preprocessor_builder.build(step)
        for step in train_config.data_augmentation_options
    ]
    data_augmentation_fn = functools.partial(
        augment_input_data,
        data_augmentation_options=data_augmentation_options)

    image_resizer_config = config_util.get_image_resizer_config(model_config)
    image_resizer_fn = image_resizer_builder.build(image_resizer_config)
    transform_data_fn = functools.partial(
        transform_input_data, model_preprocess_fn=model_preprocess_fn,
        image_resizer_fn=image_resizer_fn,
        num_classes=config_util.get_number_of_classes(model_config),
        data_augmentation_fn=data_augmentation_fn,
        merge_multiple_boxes=train_config.merge_multiple_label_boxes,
        retain_original_image=train_config.retain_original_images,
        use_multiclass_scores=train_config.use_multiclass_scores,
        use_bfloat16=train_config.use_bfloat16)

    tensor_dict = pad_input_data_to_static_shapes(
        tensor_dict=transform_data_fn(tensor_dict),
        max_num_boxes=train_input_config.max_number_of_boxes,
        num_classes=config_util.get_number_of_classes(model_config),
        spatial_image_shape=config_util.get_spatial_image_size(
            image_resizer_config))
    return (_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict))

  dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
      train_input_config,
      transform_input_data_fn=transform_and_pad_input_data_fn,
      batch_size=params['batch_size'] if params else train_config.batch_size)
  return dataset
581
582


583
def create_eval_input_fn(eval_config, eval_input_config, model_config):
584
585
586
587
588
  """Creates an eval `input` function for `Estimator`.

  Args:
    eval_config: An eval_pb2.EvalConfig.
    eval_input_config: An input_reader_pb2.InputReader.
589
    model_config: A model_pb2.DetectionModel.
590
591
592
593
594

  Returns:
    `input_fn` for `Estimator` in EVAL mode.
  """

595
  def _eval_input_fn(params=None):
596
597
    return eval_input(eval_config, eval_input_config, model_config,
                      params=params)
598

599
  return _eval_input_fn
600

601

602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
def eval_input(eval_config, eval_input_config, model_config,
               model=None, params=None):
  """Returns `features` and `labels` tensor dictionaries for evaluation.

  Args:
    eval_config: An eval_pb2.EvalConfig.
    eval_input_config: An input_reader_pb2.InputReader.
    model_config: A model_pb2.DetectionModel.
    model: A pre-constructed Detection Model.
      If None, one will be created from the config.
    params: Parameter dictionary passed from the estimator.

  Returns:
    A tf.data.Dataset that holds (features, labels) tuple.

    features: Dictionary of feature tensors.
      features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor
        with preprocessed images.
      features[HASH_KEY] is a [1] int32 tensor representing unique
        identifiers for the images.
      features[fields.InputDataFields.true_image_shape] is a [1, 3]
        int32 tensor representing the true image shapes, as preprocessed
        images could be padded.
      features[fields.InputDataFields.original_image] is a [1, H', W', C]
        float32 tensor with the original image.
    labels: Dictionary of groundtruth tensors.
      labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4]
        float32 tensor containing the corners of the groundtruth boxes.
      labels[fields.InputDataFields.groundtruth_classes] is a
        [num_boxes, num_classes] float32 one-hot tensor of classes.
      labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes]
        float32 tensor containing object areas.
      labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes]
        bool tensor indicating if the boxes enclose a crowd.
      labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes]
        int32 tensor indicating if the boxes represent difficult instances.
      -- Optional --
      labels[fields.InputDataFields.groundtruth_instance_masks] is a
        [1, num_boxes, H, W] float32 tensor containing only binary values,
        which represent instance masks for objects.

  Raises:
    TypeError: if the `eval_config`, `eval_input_config` or `model_config`
      are not of the correct type.
  """
  params = params or {}
  if not isinstance(eval_config, eval_pb2.EvalConfig):
    raise TypeError('For eval mode, the `eval_config` must be a '
                    'train_pb2.EvalConfig.')
  if not isinstance(eval_input_config, input_reader_pb2.InputReader):
    raise TypeError('The `eval_input_config` must be a '
                    'input_reader_pb2.InputReader.')
  if not isinstance(model_config, model_pb2.DetectionModel):
    raise TypeError('The `model_config` must be a '
                    'model_pb2.DetectionModel.')

  if model is None:
    model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
        model_config, is_training=False).preprocess
  else:
    model_preprocess_fn = model.preprocess

  def transform_and_pad_input_data_fn(tensor_dict):
    """Combines transform and pad operation."""
    num_classes = config_util.get_number_of_classes(model_config)

    image_resizer_config = config_util.get_image_resizer_config(model_config)
    image_resizer_fn = image_resizer_builder.build(image_resizer_config)

    transform_data_fn = functools.partial(
        transform_input_data, model_preprocess_fn=model_preprocess_fn,
        image_resizer_fn=image_resizer_fn,
        num_classes=num_classes,
        data_augmentation_fn=None,
676
677
678
        retain_original_image=eval_config.retain_original_images,
        retain_original_image_additional_channels=
        eval_config.retain_original_image_additional_channels)
679
680
681
682
683
684
685
686
687
688
689
690
    tensor_dict = pad_input_data_to_static_shapes(
        tensor_dict=transform_data_fn(tensor_dict),
        max_num_boxes=eval_input_config.max_number_of_boxes,
        num_classes=config_util.get_number_of_classes(model_config),
        spatial_image_shape=config_util.get_spatial_image_size(
            image_resizer_config))
    return (_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict))
  dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
      eval_input_config,
      batch_size=params['batch_size'] if params else eval_config.batch_size,
      transform_input_data_fn=transform_and_pad_input_data_fn)
  return dataset
691
692


693
def create_predict_input_fn(model_config, predict_input_config):
694
695
  """Creates a predict `input` function for `Estimator`.

696
697
  Args:
    model_config: A model_pb2.DetectionModel.
698
    predict_input_config: An input_reader_pb2.InputReader.
699

700
701
702
703
  Returns:
    `input_fn` for `Estimator` in PREDICT mode.
  """

704
  def _predict_input_fn(params=None):
705
706
    """Decodes serialized tf.Examples and returns `ServingInputReceiver`.

707
708
709
    Args:
      params: Parameter dictionary passed from the estimator.

710
711
712
    Returns:
      `ServingInputReceiver`.
    """
713
    del params
714
    example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')
715

716
    num_classes = config_util.get_number_of_classes(model_config)
717
718
719
    model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
        model_config, is_training=False).preprocess

720
721
    image_resizer_config = config_util.get_image_resizer_config(model_config)
    image_resizer_fn = image_resizer_builder.build(image_resizer_config)
722

723
    transform_fn = functools.partial(
724
        transform_input_data, model_preprocess_fn=model_preprocess_fn,
725
726
727
728
        image_resizer_fn=image_resizer_fn,
        num_classes=num_classes,
        data_augmentation_fn=None)

729
730
731
    decoder = tf_example_decoder.TfExampleDecoder(
        load_instance_masks=False,
        num_additional_channels=predict_input_config.num_additional_channels)
732
    input_dict = transform_fn(decoder.decode(example))
733
    images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
734
    images = tf.expand_dims(images, axis=0)
735
736
    true_image_shape = tf.expand_dims(
        input_dict[fields.InputDataFields.true_image_shape], axis=0)
737
738

    return tf.estimator.export.ServingInputReceiver(
739
740
741
        features={
            fields.InputDataFields.image: images,
            fields.InputDataFields.true_image_shape: true_image_shape},
742
743
744
        receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})

  return _predict_input_fn