"sgl-kernel/include/sgl_kernel_ops.h" did not exist on "ac2dc35d0e529a278450bceb4d234aae3a1c93d8"
eval_util.py 41.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
16
"""Common utility functions for evaluation."""
import collections
17
import os
18
import re
19
20
21
22
23
import time

import numpy as np
import tensorflow as tf

24
25
26
27
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
28
from object_detection.metrics import coco_evaluation
29
from object_detection.utils import label_map_util
30
from object_detection.utils import object_detection_evaluation
31
from object_detection.utils import ops
32
from object_detection.utils import shape_utils
33
34
35
36
from object_detection.utils import visualization_utils as vis_utils

slim = tf.contrib.slim

37
38
39
40
41
42
43
44
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
    'coco_detection_metrics':
        coco_evaluation.CocoDetectionEvaluator,
    'coco_mask_metrics':
        coco_evaluation.CocoMaskEvaluator,
45
46
    'oid_challenge_detection_metrics':
        object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
47
48
49
    'oid_challenge_segmentation_metrics':
        object_detection_evaluation
        .OpenImagesInstanceSegmentationChallengeEvaluator,
50
51
52
53
    'pascal_voc_detection_metrics':
        object_detection_evaluation.PascalDetectionEvaluator,
    'weighted_pascal_voc_detection_metrics':
        object_detection_evaluation.WeightedPascalDetectionEvaluator,
54
55
    'precision_at_recall_detection_metrics':
        object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,
56
57
58
59
60
61
    'pascal_voc_instance_segmentation_metrics':
        object_detection_evaluation.PascalInstanceSegmentationEvaluator,
    'weighted_pascal_voc_instance_segmentation_metrics':
        object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
    'oid_V2_detection_metrics':
        object_detection_evaluation.OpenImagesDetectionEvaluator,
62
63
64
65
}

EVAL_DEFAULT_METRIC = 'coco_detection_metrics'

66
67
68
69
70
71
72
73
74

def write_metrics(metrics, global_step, summary_dir):
  """Write metrics to a summary directory.

  Args:
    metrics: A dictionary containing metric names and values.
    global_step: Global step at which the metrics are computed.
    summary_dir: Directory to write tensorflow summaries to.
  """
75
  tf.logging.info('Writing metrics to tf summary.')
76
  summary_writer = tf.summary.FileWriterCache.get(summary_dir)
77
78
79
80
81
  for key in sorted(metrics):
    summary = tf.Summary(value=[
        tf.Summary.Value(tag=key, simple_value=metrics[key]),
    ])
    summary_writer.add_summary(summary, global_step)
82
83
    tf.logging.info('%s: %f', key, metrics[key])
  tf.logging.info('Metrics written to tf summary.')
84
85


86
# TODO(rathodv): Add tests.
87
88
89
90
91
92
93
94
def visualize_detection_results(result_dict,
                                tag,
                                global_step,
                                categories,
                                summary_dir='',
                                export_dir='',
                                agnostic_mode=False,
                                show_groundtruth=False,
95
                                groundtruth_box_visualization_color='black',
96
                                min_score_thresh=.5,
97
98
99
100
                                max_num_predictions=20,
                                skip_scores=False,
                                skip_labels=False,
                                keep_image_id_for_visualization_export=False):
101
102
103
104
105
106
107
108
109
110
111
112
  """Visualizes detection results and writes visualizations to image summaries.

  This function visualizes an image with its detected bounding boxes and writes
  to image summaries which can be viewed on tensorboard.  It optionally also
  writes images to a directory. In the case of missing entry in the label map,
  unknown class name in the visualization is shown as "N/A".

  Args:
    result_dict: a dictionary holding groundtruth and detection
      data corresponding to each image being evaluated.  The following keys
      are required:
        'original_image': a numpy array representing the image with shape
113
          [1, height, width, 3] or [1, height, width, 1]
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
        'detection_boxes': a numpy array of shape [N, 4]
        'detection_scores': a numpy array of shape [N]
        'detection_classes': a numpy array of shape [N]
      The following keys are optional:
        'groundtruth_boxes': a numpy array of shape [N, 4]
        'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
      Detections are assumed to be provided in decreasing order of score and for
      display, and we assume that scores are probabilities between 0 and 1.
    tag: tensorboard tag (string) to associate with image.
    global_step: global step at which the visualization are generated.
    categories: a list of dictionaries representing all possible categories.
      Each dict in this list has the following keys:
          'id': (required) an integer id uniquely identifying this category
          'name': (required) string representing category name
            e.g., 'cat', 'dog', 'pizza'
          'supercategory': (optional) string representing the supercategory
            e.g., 'animal', 'vehicle', 'food', etc
    summary_dir: the output directory to which the image summaries are written.
    export_dir: the output directory to which images are written.  If this is
      empty (default), then images are not exported.
    agnostic_mode: boolean (default: False) controlling whether to evaluate in
      class-agnostic mode or not.
    show_groundtruth: boolean (default: False) controlling whether to show
      groundtruth boxes in addition to detected boxes
138
139
    groundtruth_box_visualization_color: box color for visualizing groundtruth
      boxes
140
141
    min_score_thresh: minimum score threshold for a box to be visualized
    max_num_predictions: maximum number of detections to visualize
142
143
144
145
    skip_scores: whether to skip score when drawing a single detection
    skip_labels: whether to skip label when drawing a single detection
    keep_image_id_for_visualization_export: whether to keep image identifier in
      filename when exported to export_dir
146
147
148
149
150
  Raises:
    ValueError: if result_dict does not contain the expected keys (i.e.,
      'original_image', 'detection_boxes', 'detection_scores',
      'detection_classes')
  """
151
152
  detection_fields = fields.DetectionResultFields
  input_fields = fields.InputDataFields
153
  if not set([
154
155
156
157
      input_fields.original_image,
      detection_fields.detection_boxes,
      detection_fields.detection_scores,
      detection_fields.detection_classes,
158
159
  ]).issubset(set(result_dict.keys())):
    raise ValueError('result_dict does not contain all expected keys.')
160
  if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
161
162
    raise ValueError('If show_groundtruth is enabled, result_dict must contain '
                     'groundtruth_boxes.')
163
  tf.logging.info('Creating detection visualizations.')
164
165
  category_index = label_map_util.create_category_index(categories)

166
  image = np.squeeze(result_dict[input_fields.original_image], axis=0)
167
168
  if image.shape[2] == 1:  # If one channel image, repeat in RGB.
    image = np.tile(image, [1, 1, 3])
169
170
171
172
173
174
175
  detection_boxes = result_dict[detection_fields.detection_boxes]
  detection_scores = result_dict[detection_fields.detection_scores]
  detection_classes = np.int32((result_dict[
      detection_fields.detection_classes]))
  detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
  detection_masks = result_dict.get(detection_fields.detection_masks)
  detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
176
177
178

  # Plot groundtruth underneath detections
  if show_groundtruth:
179
180
    groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
    groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
181
    vis_utils.visualize_boxes_and_labels_on_image_array(
182
183
184
185
186
        image=image,
        boxes=groundtruth_boxes,
        classes=None,
        scores=None,
        category_index=category_index,
187
188
        keypoints=groundtruth_keypoints,
        use_normalized_coordinates=False,
189
190
        max_boxes_to_draw=None,
        groundtruth_box_visualization_color=groundtruth_box_visualization_color)
191
192
193
194
195
196
197
  vis_utils.visualize_boxes_and_labels_on_image_array(
      image,
      detection_boxes,
      detection_classes,
      detection_scores,
      category_index,
      instance_masks=detection_masks,
198
      instance_boundaries=detection_boundaries,
199
200
201
202
      keypoints=detection_keypoints,
      use_normalized_coordinates=False,
      max_boxes_to_draw=max_num_predictions,
      min_score_thresh=min_score_thresh,
203
204
205
      agnostic_mode=agnostic_mode,
      skip_scores=skip_scores,
      skip_labels=skip_labels)
206
207

  if export_dir:
208
209
210
211
212
213
214
    if keep_image_id_for_visualization_export and result_dict[fields.
                                                              InputDataFields()
                                                              .key]:
      export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
          tag, result_dict[fields.InputDataFields().key]))
    else:
      export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
215
216
217
    vis_utils.save_image_array_as_png(image, export_path)

  summary = tf.Summary(value=[
218
219
220
221
222
      tf.Summary.Value(
          tag=tag,
          image=tf.Summary.Image(
              encoded_image_string=vis_utils.encode_image_array_as_png_str(
                  image)))
223
  ])
224
  summary_writer = tf.summary.FileWriterCache.get(summary_dir)
225
226
  summary_writer.add_summary(summary, global_step)

227
228
  tf.logging.info('Detection visualizations written to summary with tag %s.',
                  tag)
229
230


231
232
233
234
235
236
237
238
239
def _run_checkpoint_once(tensor_dict,
                         evaluators=None,
                         batch_processor=None,
                         checkpoint_dirs=None,
                         variables_to_restore=None,
                         restore_fn=None,
                         num_batches=1,
                         master='',
                         save_graph=False,
240
                         save_graph_dir='',
241
                         losses_dict=None,
242
243
                         eval_export_path=None,
                         process_metrics_fn=None):
244
  """Evaluates metrics defined in evaluators and returns summaries.
245
246
247
248

  This function loads the latest checkpoint in checkpoint_dirs and evaluates
  all metrics defined in evaluators. The metrics are processed in batch by the
  batch_processor.
249
250
251
252

  Args:
    tensor_dict: a dictionary holding tensors representing a batch of detections
      and corresponding groundtruth annotations.
253
254
255
    evaluators: a list of object of type DetectionEvaluator to be used for
      evaluation. Note that the metric names produced by different evaluators
      must be unique.
256
257
258
259
260
261
262
263
264
265
266
    batch_processor: a function taking four arguments:
      1. tensor_dict: the same tensor_dict that is passed in as the first
        argument to this function.
      2. sess: a tensorflow session
      3. batch_index: an integer representing the index of the batch amongst
        all batches
      By default, batch_processor is None, which defaults to running:
        return sess.run(tensor_dict)
      To skip an image, it suffices to return an empty dictionary in place of
      result_dict.
    checkpoint_dirs: list of directories to load into an EnsembleModel. If it
267
268
      has only one directory, EnsembleModel will not be used --
        a DetectionModel
269
270
271
272
273
274
275
276
277
278
279
280
281
      will be instantiated directly. Not used if restore_fn is set.
    variables_to_restore: None, or a dictionary mapping variable names found in
      a checkpoint to model variables. The dictionary would normally be
      generated by creating a tf.train.ExponentialMovingAverage object and
      calling its variables_to_restore() method. Not used if restore_fn is set.
    restore_fn: None, or a function that takes a tf.Session object and correctly
      restores all necessary variables from the correct checkpoint file. If
      None, attempts to restore from the first directory in checkpoint_dirs.
    num_batches: the number of batches to use for evaluation.
    master: the location of the Tensorflow session.
    save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
    save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
      is True this must be non-empty.
282
    losses_dict: optional dictionary of scalar detection losses.
283
284
    eval_export_path: Path for saving a json file that contains the detection
      results in json format.
285
286
287
288
289
290
    process_metrics_fn: a callback called with evaluation results after each
      evaluation is done.  It could be used e.g. to back up checkpoints with
      best evaluation scores, or to call an external system to update evaluation
      results in order to drive best hyper-parameter search.  Parameters are:
      int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
      str checkpoint_file path.
291
292
293
294

  Returns:
    global_step: the count of global steps.
    all_evaluator_metrics: A dictionary containing metric names and values.
295
296
297
298
299
300
301
302
303
304
305

  Raises:
    ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
      one element.
    ValueError: if save_graph is True and save_graph_dir is not defined.
  """
  if save_graph and not save_graph_dir:
    raise ValueError('`save_graph_dir` must be defined.')
  sess = tf.Session(master, graph=tf.get_default_graph())
  sess.run(tf.global_variables_initializer())
  sess.run(tf.local_variables_initializer())
306
  sess.run(tf.tables_initializer())
307
  checkpoint_file = None
308
309
310
311
312
313
314
315
316
317
318
319
320
  if restore_fn:
    restore_fn(sess)
  else:
    if not checkpoint_dirs:
      raise ValueError('`checkpoint_dirs` must have at least one entry.')
    checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
    saver = tf.train.Saver(variables_to_restore)
    saver.restore(sess, checkpoint_file)

  if save_graph:
    tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')

  counters = {'skipped': 0, 'success': 0}
321
  aggregate_result_losses_dict = collections.defaultdict(list)
322
323
324
325
  with tf.contrib.slim.queues.QueueRunners(sess):
    try:
      for batch in range(int(num_batches)):
        if (batch + 1) % 100 == 0:
326
327
          tf.logging.info('Running eval ops batch %d/%d', batch + 1,
                          num_batches)
328
329
        if not batch_processor:
          try:
330
331
332
333
            if not losses_dict:
              losses_dict = {}
            result_dict, result_losses_dict = sess.run([tensor_dict,
                                                        losses_dict])
334
335
            counters['success'] += 1
          except tf.errors.InvalidArgumentError:
336
            tf.logging.info('Skipping image')
337
338
339
            counters['skipped'] += 1
            result_dict = {}
        else:
340
341
          result_dict, result_losses_dict = batch_processor(
              tensor_dict, sess, batch, counters, losses_dict=losses_dict)
342
343
        if not result_dict:
          continue
344
345
        for key, value in iter(result_losses_dict.items()):
          aggregate_result_losses_dict[key].append(value)
346
        for evaluator in evaluators:
347
          # TODO(b/65130867): Use image_id tensor once we fix the input data
348
          # decoders to return correct image_id.
349
          # TODO(akuznetsa): result_dict contains batches of images, while
350
          # add_single_ground_truth_image_info expects a single image. Fix
351
          if (isinstance(result_dict, dict) and
352
              fields.InputDataFields.key in result_dict and
353
354
355
356
              result_dict[fields.InputDataFields.key]):
            image_id = result_dict[fields.InputDataFields.key]
          else:
            image_id = batch
357
          evaluator.add_single_ground_truth_image_info(
358
              image_id=image_id, groundtruth_dict=result_dict)
359
          evaluator.add_single_detected_image_info(
360
361
              image_id=image_id, detections_dict=result_dict)
      tf.logging.info('Running eval batches done.')
362
    except tf.errors.OutOfRangeError:
363
      tf.logging.info('Done evaluating -- epoch limit reached')
364
365
    finally:
      # When done, ask the threads to stop.
366
367
      tf.logging.info('# success: %d', counters['success'])
      tf.logging.info('# skipped: %d', counters['skipped'])
368
      all_evaluator_metrics = {}
369
370
371
372
373
374
375
376
      if eval_export_path and eval_export_path is not None:
        for evaluator in evaluators:
          if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
              isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
            tf.logging.info('Started dumping to json file.')
            evaluator.dump_detections_to_json_file(
                json_output_path=eval_export_path)
            tf.logging.info('Finished dumping to json file.')
377
378
379
380
381
382
383
      for evaluator in evaluators:
        metrics = evaluator.evaluate()
        evaluator.clear()
        if any(key in all_evaluator_metrics for key in metrics):
          raise ValueError('Metric names between evaluators must not collide.')
        all_evaluator_metrics.update(metrics)
      global_step = tf.train.global_step(sess, tf.train.get_global_step())
384
385
386

      for key, value in iter(aggregate_result_losses_dict.items()):
        all_evaluator_metrics['Losses/' + key] = np.mean(value)
387
388
389
390
391
392
393
394
395
      if process_metrics_fn and checkpoint_file:
        m = re.search(r'model.ckpt-(\d+)$', checkpoint_file)
        if not m:
          tf.logging.error('Failed to parse checkpoint number from: %s',
                           checkpoint_file)
        else:
          checkpoint_number = int(m.group(1))
          process_metrics_fn(checkpoint_number, all_evaluator_metrics,
                             checkpoint_file)
396
  sess.close()
397
  return (global_step, all_evaluator_metrics)
398
399


400
# TODO(rathodv): Add tests.
401
402
def repeated_checkpoint_run(tensor_dict,
                            summary_dir,
403
                            evaluators,
404
405
406
407
408
409
410
                            batch_processor=None,
                            checkpoint_dirs=None,
                            variables_to_restore=None,
                            restore_fn=None,
                            num_batches=1,
                            eval_interval_secs=120,
                            max_number_of_evaluations=None,
411
                            max_evaluation_global_step=None,
412
413
                            master='',
                            save_graph=False,
414
                            save_graph_dir='',
415
                            losses_dict=None,
416
417
                            eval_export_path=None,
                            process_metrics_fn=None):
418
419
420
421
422
423
424
425
426
427
428
  """Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.

  This function repeatedly loads a checkpoint and evaluates a desired
  set of tensors (provided by tensor_dict) and hands the resulting numpy
  arrays to a function result_processor which can be used to further
  process/save/visualize the results.

  Args:
    tensor_dict: a dictionary holding tensors representing a batch of detections
      and corresponding groundtruth annotations.
    summary_dir: a directory to write metrics summaries.
429
430
431
    evaluators: a list of object of type DetectionEvaluator to be used for
      evaluation. Note that the metric names produced by different evaluators
      must be unique.
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    batch_processor: a function taking three arguments:
      1. tensor_dict: the same tensor_dict that is passed in as the first
        argument to this function.
      2. sess: a tensorflow session
      3. batch_index: an integer representing the index of the batch amongst
        all batches
      By default, batch_processor is None, which defaults to running:
        return sess.run(tensor_dict)
    checkpoint_dirs: list of directories to load into a DetectionModel or an
      EnsembleModel if restore_fn isn't set. Also used to determine when to run
      next evaluation. Must have at least one element.
    variables_to_restore: None, or a dictionary mapping variable names found in
      a checkpoint to model variables. The dictionary would normally be
      generated by creating a tf.train.ExponentialMovingAverage object and
      calling its variables_to_restore() method. Not used if restore_fn is set.
    restore_fn: a function that takes a tf.Session object and correctly restores
      all necessary variables from the correct checkpoint file.
    num_batches: the number of batches to use for evaluation.
    eval_interval_secs: the number of seconds between each evaluation run.
    max_number_of_evaluations: the max number of iterations of the evaluation.
      If the value is left as None the evaluation continues indefinitely.
453
    max_evaluation_global_step: global step when evaluation stops.
454
455
456
457
    master: the location of the Tensorflow session.
    save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
    save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
      is True this must be non-empty.
458
    losses_dict: optional dictionary of scalar detection losses.
459
460
    eval_export_path: Path for saving a json file that contains the detection
      results in json format.
461
462
463
464
465
466
    process_metrics_fn: a callback called with evaluation results after each
      evaluation is done.  It could be used e.g. to back up checkpoints with
      best evaluation scores, or to call an external system to update evaluation
      results in order to drive best hyper-parameter search.  Parameters are:
      int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
      str checkpoint_file path.
467
468
469
470

  Returns:
    metrics: A dictionary containing metric names and values in the latest
      evaluation.
471
472
473
474
475
476
477

  Raises:
    ValueError: if max_num_of_evaluations is not None or a positive number.
    ValueError: if checkpoint_dirs doesn't have at least one element.
  """
  if max_number_of_evaluations and max_number_of_evaluations <= 0:
    raise ValueError(
478
479
480
481
        '`max_number_of_evaluations` must be either None or a positive number.')
  if max_evaluation_global_step and max_evaluation_global_step <= 0:
    raise ValueError(
        '`max_evaluation_global_step` must be either None or positive.')
482
483
484
485
486
487
488
489

  if not checkpoint_dirs:
    raise ValueError('`checkpoint_dirs` must have at least one entry.')

  last_evaluated_model_path = None
  number_of_evaluations = 0
  while True:
    start = time.time()
490
    tf.logging.info('Starting evaluation at ' + time.strftime(
491
        '%Y-%m-%d-%H:%M:%S', time.gmtime()))
492
493
    model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
    if not model_path:
494
495
      tf.logging.info('No model found in %s. Will try again in %d seconds',
                      checkpoint_dirs[0], eval_interval_secs)
496
    elif model_path == last_evaluated_model_path:
497
498
      tf.logging.info('Found already evaluated checkpoint. Will try again in '
                      '%d seconds', eval_interval_secs)
499
500
    else:
      last_evaluated_model_path = model_path
501
502
503
504
505
506
507
508
509
510
511
512
      global_step, metrics = _run_checkpoint_once(
          tensor_dict,
          evaluators,
          batch_processor,
          checkpoint_dirs,
          variables_to_restore,
          restore_fn,
          num_batches,
          master,
          save_graph,
          save_graph_dir,
          losses_dict=losses_dict,
513
514
          eval_export_path=eval_export_path,
          process_metrics_fn=process_metrics_fn)
515
      write_metrics(metrics, global_step, summary_dir)
516
517
518
519
      if (max_evaluation_global_step and
          global_step >= max_evaluation_global_step):
        tf.logging.info('Finished evaluation!')
        break
520
521
522
523
    number_of_evaluations += 1

    if (max_number_of_evaluations and
        number_of_evaluations >= max_number_of_evaluations):
524
      tf.logging.info('Finished evaluation!')
525
526
527
528
      break
    time_to_next_eval = start + eval_interval_secs - time.time()
    if time_to_next_eval > 0:
      time.sleep(time_to_next_eval)
529
530
531
532

  return metrics


533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def _scale_box_to_absolute(args):
  boxes, image_shape = args
  return box_list_ops.to_absolute_coordinates(
      box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()


def _resize_detection_masks(args):
  detection_boxes, detection_masks, image_shape = args
  detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
      detection_masks, detection_boxes, image_shape[0], image_shape[1])
  return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)


def _resize_groundtruth_masks(args):
  mask, image_shape = args
  mask = tf.expand_dims(mask, 3)
  mask = tf.image.resize_images(
      mask,
      image_shape,
      method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
      align_corners=True)
  return tf.cast(tf.squeeze(mask, 3), tf.uint8)


def _scale_keypoint_to_absolute(args):
  keypoints, image_shape = args
  return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])


562
563
564
565
566
567
568
569
570
571
572
573
574
def result_dict_for_single_example(image,
                                   key,
                                   detections,
                                   groundtruth=None,
                                   class_agnostic=False,
                                   scale_to_absolute=False):
  """Merges all detection and groundtruth information for a single example.

  Note that evaluation tools require classes that are 1-indexed, and so this
  function performs the offset. If `class_agnostic` is True, all output classes
  have label 1.

  Args:
575
    image: A single 4D uint8 image tensor of shape [1, H, W, C].
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
    key: A single string tensor identifying the image.
    detections: A dictionary of detections, returned from
      DetectionModel.postprocess().
    groundtruth: (Optional) Dictionary of groundtruth items, with fields:
      'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
        normalized coordinates.
      'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
      'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
      'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
      'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
      'groundtruth_instance_masks': 3D int64 tensor of instance masks
        (Optional).
    class_agnostic: Boolean indicating whether the detections are class-agnostic
      (i.e. binary). Default False.
591
592
593
    scale_to_absolute: Boolean indicating whether boxes and keypoints should be
      scaled to absolute coordinates. Note that for IoU based evaluations, it
      does not matter whether boxes are expressed in absolute or relative
594
595
596
597
598
599
600
601
602
603
604
      coordinates. Default False.

  Returns:
    A dictionary with:
    'original_image': A [1, H, W, C] uint8 image tensor.
    'key': A string tensor with image identifier.
    'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`.
    'detection_scores': [max_detections] float32 tensor of scores.
    'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
605
606
    'detection_masks': [max_detections, H, W] float32 tensor of binarized
      masks, reframed to full image masks.
607
608
609
610
611
612
613
614
615
616
617
618
619
    'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`. (Optional)
    'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      (Optional)
    'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
    'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
    'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
    'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
    'groundtruth_instance_masks': 3D int64 tensor of instance masks
      (Optional).

  """
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643

  if groundtruth:
    max_gt_boxes = tf.shape(
        groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
    for gt_key in groundtruth:
      # expand groundtruth dict along the batch dimension.
      groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)

  for detection_key in detections:
    detections[detection_key] = tf.expand_dims(
        detections[detection_key][0], axis=0)

  batched_output_dict = result_dict_for_batched_example(
      image,
      tf.expand_dims(key, 0),
      detections,
      groundtruth,
      class_agnostic,
      scale_to_absolute,
      max_gt_boxes=max_gt_boxes)

  exclude_keys = [
      fields.InputDataFields.original_image,
      fields.DetectionResultFields.num_detections,
644
      fields.InputDataFields.num_groundtruth_boxes
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
  ]

  output_dict = {
      fields.InputDataFields.original_image:
          batched_output_dict[fields.InputDataFields.original_image]
  }

  for key in batched_output_dict:
    # remove the batch dimension.
    if key not in exclude_keys:
      output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
  return output_dict


def result_dict_for_batched_example(images,
                                    keys,
                                    detections,
                                    groundtruth=None,
                                    class_agnostic=False,
                                    scale_to_absolute=False,
                                    original_image_spatial_shapes=None,
666
                                    true_image_shapes=None,
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
                                    max_gt_boxes=None):
  """Merges all detection and groundtruth information for a single example.

  Note that evaluation tools require classes that are 1-indexed, and so this
  function performs the offset. If `class_agnostic` is True, all output classes
  have label 1.

  Args:
    images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
    keys: A [batch_size] string tensor with image identifier.
    detections: A dictionary of detections, returned from
      DetectionModel.postprocess().
    groundtruth: (Optional) Dictionary of groundtruth items, with fields:
      'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
        of boxes, in normalized coordinates.
      'groundtruth_classes':  [batch_size, max_number_of_boxes] int64 tensor of
        1-indexed classes.
      'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
        bbox area. (Optional)
      'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
        tensor. (Optional)
      'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
        tensor. (Optional)
      'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
        tensor. (Optional)
      'groundtruth_instance_masks': 4D int64 tensor of instance
        masks (Optional).
    class_agnostic: Boolean indicating whether the detections are class-agnostic
      (i.e. binary). Default False.
    scale_to_absolute: Boolean indicating whether boxes and keypoints should be
      scaled to absolute coordinates. Note that for IoU based evaluations, it
      does not matter whether boxes are expressed in absolute or relative
      coordinates. Default False.
    original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
      used to resize the image. When set to None, the image size is retained.
702
703
    true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
      containing the size of the unpadded original_image.
704
705
706
707
708
709
710
711
    max_gt_boxes: [batch_size] tensor representing the maximum number of
      groundtruth boxes to pad.

  Returns:
    A dictionary with:
    'original_image': A [batch_size, H, W, C] uint8 image tensor.
    'original_image_spatial_shape': A [batch_size, 2] tensor containing the
      original image sizes.
712
713
    'true_image_shape': A [batch_size, 3] tensor containing the size of
      the unpadded original_image.
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
    'key': A [batch_size] string tensor with image identifier.
    'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
      in normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`.
    'detection_scores': [batch_size, max_detections] float32 tensor of scores.
    'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
      classes.
    'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
      binarized masks, reframed to full image masks.
    'num_detections': [batch_size] int64 tensor containing number of valid
      detections.
    'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`. (Optional)
    'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
      classes. (Optional)
    'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
      area. (Optional)
    'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
    'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
    'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
    'groundtruth_instance_masks': 4D int64 tensor of instance masks
      (Optional).
    'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
      of groundtruth boxes per image.

  Raises:
741
742
743
744
    ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
      [2].
    ValueError: if true_image_shapes is not 2D int32 tensor of shape
      [3].
745
  """
746
747
  label_id_offset = 1  # Applying label id offset (b/63711816)

748
  input_data_fields = fields.InputDataFields
749
750
751
752
753
754
755
756
757
758
759
  if original_image_spatial_shapes is None:
    original_image_spatial_shapes = tf.tile(
        tf.expand_dims(tf.shape(images)[1:3], axis=0),
        multiples=[tf.shape(images)[0], 1])
  else:
    if (len(original_image_spatial_shapes.shape) != 2 and
        original_image_spatial_shapes.shape[1] != 2):
      raise ValueError(
          '`original_image_spatial_shape` should be a 2D tensor of shape '
          '[batch_size, 2].')

760
761
762
763
764
765
766
767
768
769
  if true_image_shapes is None:
    true_image_shapes = tf.tile(
        tf.expand_dims(tf.shape(images)[1:4], axis=0),
        multiples=[tf.shape(images)[0], 1])
  else:
    if (len(true_image_shapes.shape) != 2
        and true_image_shapes.shape[1] != 3):
      raise ValueError('`true_image_shapes` should be a 2D tensor of '
                       'shape [batch_size, 3].')

770
  output_dict = {
771
772
773
774
      input_data_fields.original_image:
          images,
      input_data_fields.key:
          keys,
775
      input_data_fields.original_image_spatial_shape: (
776
777
778
          original_image_spatial_shapes),
      input_data_fields.true_image_shape:
          true_image_shapes
779
780
781
  }

  detection_fields = fields.DetectionResultFields
782
783
  detection_boxes = detections[detection_fields.detection_boxes]
  detection_scores = detections[detection_fields.detection_scores]
784
785
  num_detections = tf.cast(detections[detection_fields.num_detections],
                           dtype=tf.int32)
786
787
788
789
790

  if class_agnostic:
    detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
  else:
    detection_classes = (
791
        tf.to_int64(detections[detection_fields.detection_classes]) +
792
        label_id_offset)
793

794
795
  if scale_to_absolute:
    output_dict[detection_fields.detection_boxes] = (
796
797
798
799
        shape_utils.static_or_dynamic_map_fn(
            _scale_box_to_absolute,
            elems=[detection_boxes, original_image_spatial_shapes],
            dtype=tf.float32))
800
801
  else:
    output_dict[detection_fields.detection_boxes] = detection_boxes
802
  output_dict[detection_fields.detection_classes] = detection_classes
803
  output_dict[detection_fields.detection_scores] = detection_scores
804
  output_dict[detection_fields.num_detections] = num_detections
805
806

  if detection_fields.detection_masks in detections:
807
    detection_masks = detections[detection_fields.detection_masks]
808
    # TODO(rathodv): This should be done in model's postprocess
809
    # function ideally.
810
811
812
813
814
815
816
    output_dict[detection_fields.detection_masks] = (
        shape_utils.static_or_dynamic_map_fn(
            _resize_detection_masks,
            elems=[detection_boxes, detection_masks,
                   original_image_spatial_shapes],
            dtype=tf.uint8))

817
  if detection_fields.detection_keypoints in detections:
818
    detection_keypoints = detections[detection_fields.detection_keypoints]
819
820
821
    output_dict[detection_fields.detection_keypoints] = detection_keypoints
    if scale_to_absolute:
      output_dict[detection_fields.detection_keypoints] = (
822
823
824
825
          shape_utils.static_or_dynamic_map_fn(
              _scale_keypoint_to_absolute,
              elems=[detection_keypoints, original_image_spatial_shapes],
              dtype=tf.float32))
826
827

  if groundtruth:
828
829
830
831
832
833
834
    if max_gt_boxes is None:
      if input_data_fields.num_groundtruth_boxes in groundtruth:
        max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
      else:
        raise ValueError(
            'max_gt_boxes must be provided when processing batched examples.')

835
    if input_data_fields.groundtruth_instance_masks in groundtruth:
836
      masks = groundtruth[input_data_fields.groundtruth_instance_masks]
837
838
839
840
841
842
      groundtruth[input_data_fields.groundtruth_instance_masks] = (
          shape_utils.static_or_dynamic_map_fn(
              _resize_groundtruth_masks,
              elems=[masks, original_image_spatial_shapes],
              dtype=tf.uint8))

843
844
845
846
    output_dict.update(groundtruth)
    if scale_to_absolute:
      groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
      output_dict[input_data_fields.groundtruth_boxes] = (
847
848
849
850
851
          shape_utils.static_or_dynamic_map_fn(
              _scale_box_to_absolute,
              elems=[groundtruth_boxes, original_image_spatial_shapes],
              dtype=tf.float32))

852
853
854
855
856
857
    # For class-agnostic models, groundtruth classes all become 1.
    if class_agnostic:
      groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
      groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
      output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes

858
859
    output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes

860
  return output_dict
861
862


863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
def get_evaluators(eval_config, categories, evaluator_options=None):
  """Returns the evaluator class according to eval_config, valid for categories.

  Args:
    eval_config: An `eval_pb2.EvalConfig`.
    categories: A list of dicts, each of which has the following keys -
        'id': (required) an integer id uniquely identifying this category.
        'name': (required) string representing category name e.g., 'cat', 'dog'.
    evaluator_options: A dictionary of metric names (see
      EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
      keyword arguments. For example:
      evalator_options = {
        'coco_detection_metrics': {'include_metrics_per_category': True}
      }

  Returns:
    An list of instances of DetectionEvaluator.

  Raises:
    ValueError: if metric is not in the metric class dictionary.
  """
  evaluator_options = evaluator_options or {}
  eval_metric_fn_keys = eval_config.metrics_set
  if not eval_metric_fn_keys:
    eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
  evaluators_list = []
  for eval_metric_fn_key in eval_metric_fn_keys:
    if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
      raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
    kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
                   in evaluator_options else {})
    evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
        categories,
        **kwargs_dict))
  return evaluators_list


def get_eval_metric_ops_for_evaluators(eval_config,
901
                                       categories,
902
903
                                       eval_dict):
  """Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
904
905

  Args:
906
    eval_config: An `eval_pb2.EvalConfig`.
907
908
909
910
911
912
913
914
915
916
917
    categories: A list of dicts, each of which has the following keys -
        'id': (required) an integer id uniquely identifying this category.
        'name': (required) string representing category name e.g., 'cat', 'dog'.
    eval_dict: An evaluation dictionary, returned from
      result_dict_for_single_example().

  Returns:
    A dictionary of metric names to tuple of value_op and update_op that can be
    used as eval metric ops in tf.EstimatorSpec.
  """
  eval_metric_ops = {}
918
919
920
921
922
  evaluator_options = evaluator_options_from_eval_config(eval_config)
  evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
  for evaluator in evaluators_list:
    eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
        eval_dict))
923
  return eval_metric_ops
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947


def evaluator_options_from_eval_config(eval_config):
  """Produces a dictionary of evaluation options for each eval metric.

  Args:
    eval_config: An `eval_pb2.EvalConfig`.

  Returns:
    evaluator_options: A dictionary of metric names (see
      EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
      keyword arguments. For example:
      evalator_options = {
        'coco_detection_metrics': {'include_metrics_per_category': True}
      }
  """
  eval_metric_fn_keys = eval_config.metrics_set
  evaluator_options = {}
  for eval_metric_fn_key in eval_metric_fn_keys:
    if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
      evaluator_options[eval_metric_fn_key] = {
          'include_metrics_per_category': (
              eval_config.include_metrics_per_category)
      }
948
949
950
951
952
    elif eval_metric_fn_key == 'precision_at_recall_detection_metrics':
      evaluator_options[eval_metric_fn_key] = {
          'recall_lower_bound': (eval_config.recall_lower_bound),
          'recall_upper_bound': (eval_config.recall_upper_bound)
      }
953
  return evaluator_options