exporter_test.py 30.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""Tests for object_detection.export_inference_graph."""
import os
import numpy as np
19
import six
20
import tensorflow as tf
21
from google.protobuf import text_format
22
23
24
25
26
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2

27
28
29
30
31
if six.PY2:
  import mock  # pylint: disable=g-import-not-at-top
else:
  from unittest import mock  # pylint: disable=g-import-not-at-top

Vivek Rathod's avatar
Vivek Rathod committed
32
33
slim = tf.contrib.slim

34
35
36

class FakeModel(model.DetectionModel):

37
38
39
  def __init__(self, add_detection_masks=False):
    self._add_detection_masks = add_detection_masks

40
  def preprocess(self, inputs):
41
42
    true_image_shapes = []  # Doesn't matter for the fake model.
    return tf.identity(inputs), true_image_shapes
43

44
  def predict(self, preprocessed_inputs, true_image_shapes):
45
    return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
46

47
  def postprocess(self, prediction_dict, true_image_shapes):
48
    with tf.control_dependencies(prediction_dict.values()):
49
      postprocessed_tensors = {
50
51
52
53
54
55
56
57
58
          'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
                                           [0.5, 0.5, 0.8, 0.8]],
                                          [[0.5, 0.5, 1.0, 1.0],
                                           [0.0, 0.0, 0.0, 0.0]]], tf.float32),
          'detection_scores': tf.constant([[0.7, 0.6],
                                           [0.9, 0.0]], tf.float32),
          'detection_classes': tf.constant([[0, 1],
                                            [1, 0]], tf.float32),
          'num_detections': tf.constant([2, 1], tf.float32)
59
      }
60
61
      if self._add_detection_masks:
        postprocessed_tensors['detection_masks'] = tf.constant(
62
            np.arange(64).reshape([2, 2, 4, 4]), tf.float32)
63
    return postprocessed_tensors
64

65
  def restore_map(self, checkpoint_path, from_detection_checkpoint):
66
67
    pass

68
  def loss(self, prediction_dict, true_image_shapes):
69
70
71
72
73
74
75
76
77
    pass


class ExportInferenceGraphTest(tf.test.TestCase):

  def _save_checkpoint_from_mock_model(self, checkpoint_path,
                                       use_moving_averages):
    g = tf.Graph()
    with g.as_default():
78
      mock_model = FakeModel()
79
      preprocessed_inputs, true_image_shapes = mock_model.preprocess(
80
          tf.placeholder(tf.float32, shape=[None, None, None, 3]))
81
82
      predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
      mock_model.postprocess(predictions, true_image_shapes)
83
84
      if use_moving_averages:
        tf.train.ExponentialMovingAverage(0.0).apply()
Vivek Rathod's avatar
Vivek Rathod committed
85
      slim.get_or_create_global_step()
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
      saver = tf.train.Saver()
      init = tf.global_variables_initializer()
      with self.test_session() as sess:
        sess.run(init)
        saver.save(sess, checkpoint_path)

  def _load_inference_graph(self, inference_graph_path):
    od_graph = tf.Graph()
    with od_graph.as_default():
      od_graph_def = tf.GraphDef()
      with tf.gfile.GFile(inference_graph_path) as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')
    return od_graph

  def _create_tf_example(self, image_array):
    with self.test_session():
      encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
    def _bytes_feature(value):
      return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
    example = tf.train.Example(features=tf.train.Features(feature={
        'image/encoded': _bytes_feature(encoded_image),
        'image/format': _bytes_feature('jpg'),
        'image/source_id': _bytes_feature('image_id')
    })).SerializeToString()
    return example

  def test_export_graph_with_image_tensor_input(self):
115
116
117
118
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
119
120
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
121
      mock_builder.return_value = FakeModel()
122
      output_directory = os.path.join(tmp_dir, 'output')
123
124
125
126
127
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
128
129
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
Vivek Rathod's avatar
Vivek Rathod committed
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
      self.assertTrue(os.path.exists(os.path.join(
          output_directory, 'saved_model', 'saved_model.pb')))

  def test_export_graph_with_fixed_size_image_tensor_input(self):
    input_shape = [1, 320, 320, 3]

    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(
        trained_checkpoint_prefix, use_moving_averages=False)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      output_directory = os.path.join(tmp_dir, 'output')
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory,
          input_shape=input_shape)
      saved_model_path = os.path.join(output_directory, 'saved_model')
      self.assertTrue(
          os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))

    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        meta_graph = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
        self.assertSequenceEqual(image_tensor.get_shape().as_list(),
                                 input_shape)
165
166

  def test_export_graph_with_tf_example_input(self):
167
168
169
170
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
171
172
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
173
      mock_builder.return_value = FakeModel()
174
      output_directory = os.path.join(tmp_dir, 'output')
175
176
177
178
179
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
180
181
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
Vivek Rathod's avatar
Vivek Rathod committed
182
183
      self.assertTrue(os.path.exists(os.path.join(
          output_directory, 'saved_model', 'saved_model.pb')))
184

185
  def test_export_graph_with_encoded_image_string_input(self):
186
187
188
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
189
190
191
                                          use_moving_averages=False)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
192
      mock_builder.return_value = FakeModel()
193
      output_directory = os.path.join(tmp_dir, 'output')
194
195
196
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
197
          input_type='encoded_image_string_tensor',
198
          pipeline_config=pipeline_config,
199
200
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
Vivek Rathod's avatar
Vivek Rathod committed
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
      self.assertTrue(os.path.exists(os.path.join(
          output_directory, 'saved_model', 'saved_model.pb')))

  def _get_variables_in_checkpoint(self, checkpoint_file):
    return set([
        var_name
        for var_name, _ in tf.train.list_variables(checkpoint_file)])

  def test_replace_variable_values_with_moving_averages(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    graph = tf.Graph()
    with graph.as_default():
      fake_model = FakeModel()
218
      preprocessed_inputs, true_image_shapes = fake_model.preprocess(
Vivek Rathod's avatar
Vivek Rathod committed
219
          tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3]))
220
221
      predictions = fake_model.predict(preprocessed_inputs, true_image_shapes)
      fake_model.postprocess(predictions, true_image_shapes)
Vivek Rathod's avatar
Vivek Rathod committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
      exporter.replace_variable_values_with_moving_averages(
          graph, trained_checkpoint_prefix, new_checkpoint_prefix)

    expected_variables = set(['conv2d/bias', 'conv2d/kernel'])
    variables_in_old_ckpt = self._get_variables_in_checkpoint(
        trained_checkpoint_prefix)
    self.assertIn('conv2d/bias/ExponentialMovingAverage',
                  variables_in_old_ckpt)
    self.assertIn('conv2d/kernel/ExponentialMovingAverage',
                  variables_in_old_ckpt)
    variables_in_new_ckpt = self._get_variables_in_checkpoint(
        new_checkpoint_prefix)
    self.assertTrue(expected_variables.issubset(variables_in_new_ckpt))
    self.assertNotIn('conv2d/bias/ExponentialMovingAverage',
                     variables_in_new_ckpt)
    self.assertNotIn('conv2d/kernel/ExponentialMovingAverage',
                     variables_in_new_ckpt)
239

240
241
242
243
  def test_export_graph_with_moving_averages(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
244
                                          use_moving_averages=True)
245
    output_directory = os.path.join(tmp_dir, 'output')
246
247
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
248
      mock_builder.return_value = FakeModel()
249
250
251
252
253
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = True
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
254
255
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
Vivek Rathod's avatar
Vivek Rathod committed
256
257
258
259
260
261
      self.assertTrue(os.path.exists(os.path.join(
          output_directory, 'saved_model', 'saved_model.pb')))
    expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step'])
    actual_variables = set(
        [var_name for var_name, _ in tf.train.list_variables(output_directory)])
    self.assertTrue(expected_variables.issubset(actual_variables))
262

263
  def test_export_model_with_all_output_nodes(self):
264
265
266
267
268
269
270
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
271
272
273
274
275
276
277
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
278
279
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
280
281
282
283
284
285
286
287
288
289
    inference_graph = self._load_inference_graph(inference_graph_path)
    with self.test_session(graph=inference_graph):
      inference_graph.get_tensor_by_name('image_tensor:0')
      inference_graph.get_tensor_by_name('detection_boxes:0')
      inference_graph.get_tensor_by_name('detection_scores:0')
      inference_graph.get_tensor_by_name('detection_classes:0')
      inference_graph.get_tensor_by_name('detection_masks:0')
      inference_graph.get_tensor_by_name('num_detections:0')

  def test_export_model_with_detection_only_nodes(self):
290
291
292
293
294
295
296
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
297
298
299
300
301
302
303
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=False)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
304
305
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
306
307
308
309
310
311
312
313
314
315
    inference_graph = self._load_inference_graph(inference_graph_path)
    with self.test_session(graph=inference_graph):
      inference_graph.get_tensor_by_name('image_tensor:0')
      inference_graph.get_tensor_by_name('detection_boxes:0')
      inference_graph.get_tensor_by_name('detection_scores:0')
      inference_graph.get_tensor_by_name('detection_classes:0')
      inference_graph.get_tensor_by_name('num_detections:0')
      with self.assertRaises(KeyError):
        inference_graph.get_tensor_by_name('detection_masks:0')

316
  def test_export_and_run_inference_with_image_tensor(self):
317
318
319
320
321
322
323
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
324
325
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
326
      mock_builder.return_value = FakeModel(add_detection_masks=True)
327
328
329
330
331
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
332
333
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
334
335
336
337
338
339
340

    inference_graph = self._load_inference_graph(inference_graph_path)
    with self.test_session(graph=inference_graph) as sess:
      image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
341
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
342
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
343
      (boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
344
          [boxes, scores, classes, masks, num_detections],
345
346
347
348
349
350
351
352
353
354
355
          feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)})
      self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                      [0.5, 0.5, 0.8, 0.8]],
                                     [[0.5, 0.5, 1.0, 1.0],
                                      [0.0, 0.0, 0.0, 0.0]]])
      self.assertAllClose(scores_np, [[0.7, 0.6],
                                      [0.9, 0.0]])
      self.assertAllClose(classes_np, [[1, 2],
                                       [2, 1]])
      self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
      self.assertAllClose(num_detections_np, [2, 1])
356

357
358
359
360
361
362
363
364
365
366
367
368
369
  def _create_encoded_image_string(self, image_array_np, encoding_format):
    od_graph = tf.Graph()
    with od_graph.as_default():
      if encoding_format == 'jpg':
        encoded_string = tf.image.encode_jpeg(image_array_np)
      elif encoding_format == 'png':
        encoded_string = tf.image.encode_png(image_array_np)
      else:
        raise ValueError('Supports only the following formats: `jpg`, `png`')
    with self.test_session(graph=od_graph):
      return encoded_string.eval()

  def test_export_and_run_inference_with_encoded_image_string_tensor(self):
370
371
372
373
374
375
376
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
377
378
379
380
381
382
383
384
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='encoded_image_string_tensor',
          pipeline_config=pipeline_config,
385
386
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

    inference_graph = self._load_inference_graph(inference_graph_path)
    jpg_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
    png_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'png')
    with self.test_session(graph=inference_graph) as sess:
      image_str_tensor = inference_graph.get_tensor_by_name(
          'encoded_image_string_tensor:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
      for image_str in [jpg_image_str, png_image_str]:
402
        image_str_batch_np = np.hstack([image_str]* 2)
403
404
405
        (boxes_np, scores_np, classes_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, masks, num_detections],
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
             feed_dict={image_str_tensor: image_str_batch_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])

  def test_raise_runtime_error_on_images_with_different_sizes(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='encoded_image_string_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

    inference_graph = self._load_inference_graph(inference_graph_path)
    large_image = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
    small_image = self._create_encoded_image_string(
        np.ones((2, 2, 3)).astype(np.uint8), 'jpg')

    image_str_batch_np = np.hstack([large_image, small_image])
    with self.test_session(graph=inference_graph) as sess:
      image_str_tensor = inference_graph.get_tensor_by_name(
          'encoded_image_string_tensor:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
      with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
453
                                   'TensorArray.*shape'):
454
455
        sess.run([boxes, scores, classes, masks, num_detections],
                 feed_dict={image_str_tensor: image_str_batch_np})
456

457
  def test_export_and_run_inference_with_tf_example(self):
458
459
460
461
462
463
464
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
465
466
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
467
      mock_builder.return_value = FakeModel(add_detection_masks=True)
468
469
470
471
472
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
473
474
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
475
476

    inference_graph = self._load_inference_graph(inference_graph_path)
477
478
    tf_example_np = np.expand_dims(self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8)), axis=0)
479
480
481
482
483
    with self.test_session(graph=inference_graph) as sess:
      tf_example = inference_graph.get_tensor_by_name('tf_example:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
484
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
485
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
486
      (boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
487
          [boxes, scores, classes, masks, num_detections],
488
489
490
491
492
493
494
495
496
497
498
          feed_dict={tf_example: tf_example_np})
      self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                      [0.5, 0.5, 0.8, 0.8]],
                                     [[0.5, 0.5, 1.0, 1.0],
                                      [0.0, 0.0, 0.0, 0.0]]])
      self.assertAllClose(scores_np, [[0.7, 0.6],
                                      [0.9, 0.0]])
      self.assertAllClose(classes_np, [[1, 2],
                                       [2, 1]])
      self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
      self.assertAllClose(num_detections_np, [2, 1])
499

500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  def test_export_graph_saves_pipeline_file(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
      expected_pipeline_path = os.path.join(
          output_directory, 'pipeline.config')
      self.assertTrue(os.path.exists(expected_pipeline_path))

      written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      with tf.gfile.GFile(expected_pipeline_path, 'r') as f:
        proto_str = f.read()
        text_format.Merge(proto_str, written_pipeline_config)
        self.assertProtoEquals(pipeline_config, written_pipeline_config)

525
  def test_export_saved_model_and_run_inference(self):
526
527
528
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
529
                                          use_moving_averages=False)
530
531
    output_directory = os.path.join(tmp_dir, 'output')
    saved_model_path = os.path.join(output_directory, 'saved_model')
532
533
534
535
536
537
538
539
540

    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
541
542
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
543

544
545
    tf_example_np = np.hstack([self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8))] * 2)
546
547
    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
Vivek Rathod's avatar
Vivek Rathod committed
548
        meta_graph = tf.saved_model.loader.load(
549
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
Vivek Rathod's avatar
Vivek Rathod committed
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565

        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        tf_example = od_graph.get_tensor_by_name(input_tensor_name)

        boxes = od_graph.get_tensor_by_name(
            signature.outputs['detection_boxes'].name)
        scores = od_graph.get_tensor_by_name(
            signature.outputs['detection_scores'].name)
        classes = od_graph.get_tensor_by_name(
            signature.outputs['detection_classes'].name)
        masks = od_graph.get_tensor_by_name(
            signature.outputs['detection_masks'].name)
        num_detections = od_graph.get_tensor_by_name(
            signature.outputs['num_detections'].name)

566
567
568
569
570
571
572
573
574
575
576
577
578
579
        (boxes_np, scores_np, classes_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, masks, num_detections],
             feed_dict={tf_example: tf_example_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
580

581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
  def test_export_checkpoint_and_run_inference(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
    output_directory = os.path.join(tmp_dir, 'output')
    model_path = os.path.join(output_directory, 'model.ckpt')
    meta_graph_path = model_path + '.meta'

    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

601
602
    tf_example_np = np.hstack([self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8))] * 2)
603
604
605
606
607
608
609
610
611
612
613
    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        new_saver = tf.train.import_meta_graph(meta_graph_path)
        new_saver.restore(sess, model_path)

        tf_example = od_graph.get_tensor_by_name('tf_example:0')
        boxes = od_graph.get_tensor_by_name('detection_boxes:0')
        scores = od_graph.get_tensor_by_name('detection_scores:0')
        classes = od_graph.get_tensor_by_name('detection_classes:0')
        masks = od_graph.get_tensor_by_name('detection_masks:0')
        num_detections = od_graph.get_tensor_by_name('num_detections:0')
614
615
616
617
618
619
620
621
622
623
624
625
626
627
        (boxes_np, scores_np, classes_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, masks, num_detections],
             feed_dict={tf_example: tf_example_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
628
629


630
631
if __name__ == '__main__':
  tf.test.main()