evaluator.py 12.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model evaluator.

This file provides a generic evaluation method that can be used to evaluate a
DetectionModel.
"""
20

21
import logging
22
import tensorflow.compat.v1 as tf
23
24
25
26

from object_detection import eval_util
from object_detection.core import prefetcher
from object_detection.core import standard_fields as fields
27
from object_detection.metrics import coco_evaluation
28
29
30
31
32
33
from object_detection.utils import object_detection_evaluation

# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
34
    'pascal_voc_detection_metrics':
35
        object_detection_evaluation.PascalDetectionEvaluator,
36
    'weighted_pascal_voc_detection_metrics':
37
        object_detection_evaluation.WeightedPascalDetectionEvaluator,
38
39
40
41
    'pascal_voc_instance_segmentation_metrics':
        object_detection_evaluation.PascalInstanceSegmentationEvaluator,
    'weighted_pascal_voc_instance_segmentation_metrics':
        object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
42
43
44
    'oid_V2_detection_metrics':
        object_detection_evaluation.OpenImagesDetectionEvaluator,
    # DEPRECATED: please use oid_V2_detection_metrics instead
45
    'open_images_V2_detection_metrics':
46
47
48
49
50
        object_detection_evaluation.OpenImagesDetectionEvaluator,
    'coco_detection_metrics':
        coco_evaluation.CocoDetectionEvaluator,
    'coco_mask_metrics':
        coco_evaluation.CocoMaskEvaluator,
51
52
53
    'oid_challenge_detection_metrics':
        object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
    # DEPRECATED: please use oid_challenge_detection_metrics instead
54
55
    'oid_challenge_object_detection_metrics':
        object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
56
57
58
    'oid_challenge_segmentation_metrics':
        object_detection_evaluation
        .OpenImagesInstanceSegmentationChallengeEvaluator,
59
60
}

61
62
EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics'

63

64
65
66
67
def _extract_predictions_and_losses(model,
                                    create_input_dict_fn,
                                    ignore_groundtruth=False):
  """Constructs tensorflow detection graph and returns output tensors.
68
69
70
71
72
73
74

  Args:
    model: model to perform predictions with.
    create_input_dict_fn: function to create input tensor dictionaries.
    ignore_groundtruth: whether groundtruth should be ignored.

  Returns:
75
76
77
78
79
    prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed
      by standard_fields.DetectionResultsFields) and optional groundtruth
      tensors (keyed by standard_fields.InputDataFields).
    losses_dict: A dictionary containing detection losses. This is empty when
      ignore_groundtruth is true.
80
81
82
83
84
  """
  input_dict = create_input_dict_fn()
  prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
  input_dict = prefetch_queue.dequeue()
  original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)
85
  preprocessed_image, true_image_shapes = model.preprocess(
86
      tf.cast(original_image, dtype=tf.float32))
87
88
  prediction_dict = model.predict(preprocessed_image, true_image_shapes)
  detections = model.postprocess(prediction_dict, true_image_shapes)
89

90
  groundtruth = None
91
  losses_dict = {}
92
  if not ignore_groundtruth:
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
    groundtruth = {
        fields.InputDataFields.groundtruth_boxes:
            input_dict[fields.InputDataFields.groundtruth_boxes],
        fields.InputDataFields.groundtruth_classes:
            input_dict[fields.InputDataFields.groundtruth_classes],
        fields.InputDataFields.groundtruth_area:
            input_dict[fields.InputDataFields.groundtruth_area],
        fields.InputDataFields.groundtruth_is_crowd:
            input_dict[fields.InputDataFields.groundtruth_is_crowd],
        fields.InputDataFields.groundtruth_difficult:
            input_dict[fields.InputDataFields.groundtruth_difficult]
    }
    if fields.InputDataFields.groundtruth_group_of in input_dict:
      groundtruth[fields.InputDataFields.groundtruth_group_of] = (
          input_dict[fields.InputDataFields.groundtruth_group_of])
108
    groundtruth_masks_list = None
109
110
111
    if fields.DetectionResultFields.detection_masks in detections:
      groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
          input_dict[fields.InputDataFields.groundtruth_instance_masks])
112
113
114
115
116
117
118
119
      groundtruth_masks_list = [
          input_dict[fields.InputDataFields.groundtruth_instance_masks]]
    groundtruth_keypoints_list = None
    if fields.DetectionResultFields.detection_keypoints in detections:
      groundtruth[fields.InputDataFields.groundtruth_keypoints] = (
          input_dict[fields.InputDataFields.groundtruth_keypoints])
      groundtruth_keypoints_list = [
          input_dict[fields.InputDataFields.groundtruth_keypoints]]
120
121
122
123
    label_id_offset = 1
    model.provide_groundtruth(
        [input_dict[fields.InputDataFields.groundtruth_boxes]],
        [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes]
124
                    - label_id_offset, depth=model.num_classes)],
125
126
        groundtruth_masks_list=groundtruth_masks_list,
        groundtruth_keypoints_list=groundtruth_keypoints_list)
127
128
129
    losses_dict.update(model.loss(prediction_dict, true_image_shapes))

  result_dict = eval_util.result_dict_for_single_example(
130
131
132
133
134
135
136
      original_image,
      input_dict[fields.InputDataFields.source_id],
      detections,
      groundtruth,
      class_agnostic=(
          fields.DetectionResultFields.detection_classes not in detections),
      scale_to_absolute=True)
137
  return result_dict, losses_dict
138
139
140
141
142
143
144
145
146
147
148
149
150
151


def get_evaluators(eval_config, categories):
  """Returns the evaluator class according to eval_config, valid for categories.

  Args:
    eval_config: evaluation configurations.
    categories: a list of categories to evaluate.
  Returns:
    An list of instances of DetectionEvaluator.

  Raises:
    ValueError: if metric is not in the metric class dictionary.
  """
152
153
154
155
156
157
158
  eval_metric_fn_keys = eval_config.metrics_set
  if not eval_metric_fn_keys:
    eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
  evaluators_list = []
  for eval_metric_fn_key in eval_metric_fn_keys:
    if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
      raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
159
160
161
162
163
164
165
166
167
168
    if eval_metric_fn_key == 'oid_challenge_object_detection_metrics':
      logging.warning(
          'oid_challenge_object_detection_metrics is deprecated; '
          'use oid_challenge_detection_metrics instead'
      )
    if eval_metric_fn_key == 'oid_V2_detection_metrics':
      logging.warning(
          'open_images_V2_detection_metrics is deprecated; '
          'use oid_V2_detection_metrics instead'
      )
Zhichao Lu's avatar
Zhichao Lu committed
169
170
    evaluators_list.append(
        EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
171
  return evaluators_list
172
173
174


def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
175
             checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None):
176
177
178
179
180
181
182
183
184
185
  """Evaluation function for detection models.

  Args:
    create_input_dict_fn: a function to create a tensor input dictionary.
    create_model_fn: a function that creates a DetectionModel.
    eval_config: a eval_pb2.EvalConfig protobuf.
    categories: a list of category dictionaries. Each dict in the list should
                have an integer 'id' field and string 'name' field.
    checkpoint_dir: directory to load the checkpoints to evaluate from.
    eval_dir: directory to write evaluation metrics summary to.
186
187
188
189
    graph_hook_fn: Optional function that is called after the training graph is
      completely built. This is helpful to perform additional changes to the
      training graph such as optimizing batchnorm. The function should modify
      the default graph.
190
191
    evaluator_list: Optional list of instances of DetectionEvaluator. If not
      given, this list of metrics is created according to the eval_config.
192
193
194
195

  Returns:
    metrics: A dictionary containing metric names and values from the latest
      run.
196
197
198
199
200
201
202
203
  """

  model = create_model_fn()

  if eval_config.ignore_groundtruth and not eval_config.export_path:
    logging.fatal('If ignore_groundtruth=True then an export_path is '
                  'required. Aborting!!!')

204
  tensor_dict, losses_dict = _extract_predictions_and_losses(
205
206
207
208
      model=model,
      create_input_dict_fn=create_input_dict_fn,
      ignore_groundtruth=eval_config.ignore_groundtruth)

209
210
211
  def _process_batch(tensor_dict, sess, batch_index, counters,
                     losses_dict=None):
    """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.
212
213
214
215
216
217
218
219
220
221
222
223
224

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
225
      losses_dict: Optional dictonary of scalar loss tensors.
226
227
228

    Returns:
      result_dict: a dictionary of numpy arrays
229
230
      result_losses_dict: a dictionary of scalar losses. This is empty if input
        losses_dict is None.
231
232
    """
    try:
233
234
235
      if not losses_dict:
        losses_dict = {}
      result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict])
236
237
238
239
      counters['success'] += 1
    except tf.errors.InvalidArgumentError:
      logging.info('Skipping image')
      counters['skipped'] += 1
240
      return {}, {}
241
    global_step = tf.train.global_step(sess, tf.train.get_global_step())
242
243
244
    if batch_index < eval_config.num_visualizations:
      tag = 'image-{}'.format(batch_index)
      eval_util.visualize_detection_results(
245
246
247
248
          result_dict,
          tag,
          global_step,
          categories=categories,
249
250
          summary_dir=eval_dir,
          export_dir=eval_config.visualization_export_dir,
251
252
253
254
255
256
257
258
259
          show_groundtruth=eval_config.visualize_groundtruth_boxes,
          groundtruth_box_visualization_color=eval_config.
          groundtruth_box_visualization_color,
          min_score_thresh=eval_config.min_score_threshold,
          max_num_predictions=eval_config.max_num_boxes_to_visualize,
          skip_scores=eval_config.skip_scores,
          skip_labels=eval_config.skip_labels,
          keep_image_id_for_visualization_export=eval_config.
          keep_image_id_for_visualization_export)
260
    return result_dict, result_losses_dict
261

262
263
  if graph_hook_fn: graph_hook_fn()

264
  variables_to_restore = tf.global_variables()
265
  global_step = tf.train.get_or_create_global_step()
266
  variables_to_restore.append(global_step)
267

268
269
270
271
  if eval_config.use_moving_averages:
    variable_averages = tf.train.ExponentialMovingAverage(0.0)
    variables_to_restore = variable_averages.variables_to_restore()
  saver = tf.train.Saver(variables_to_restore)
272

273
274
275
276
  def _restore_latest_checkpoint(sess):
    latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
    saver.restore(sess, latest_checkpoint)

277
278
279
  if not evaluator_list:
    evaluator_list = get_evaluators(eval_config, categories)

280
  metrics = eval_util.repeated_checkpoint_run(
281
282
      tensor_dict=tensor_dict,
      summary_dir=eval_dir,
283
      evaluators=evaluator_list,
284
285
286
287
288
289
      batch_processor=_process_batch,
      checkpoint_dirs=[checkpoint_dir],
      variables_to_restore=None,
      restore_fn=_restore_latest_checkpoint,
      num_batches=eval_config.num_examples,
      eval_interval_secs=eval_config.eval_interval_secs,
290
291
292
      max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else
                                 eval_config.max_evals
                                 if eval_config.max_evals else None),
293
294
      master=eval_config.eval_master,
      save_graph=eval_config.save_graph,
295
      save_graph_dir=(eval_dir if eval_config.save_graph else ''),
296
297
      losses_dict=losses_dict,
      eval_export_path=eval_config.export_path)
298
299

  return metrics