dataset_builder.py 7.23 KB
Newer Older
1
# Lint as: python2, python3
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.

Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.

Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
25
26
27
28
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

29
import functools
30
31
import tensorflow as tf

32
33
from tensorflow.contrib import data as tf_data
from object_detection.builders import decoder_builder
34
35
36
from object_detection.protos import input_reader_pb2


37
38
39
40
41
def make_initializable_iterator(dataset):
  """Creates an iterator, and initializes tables.

  This is useful in cases where make_one_shot_iterator wouldn't work because
  the graph contains a hash table that needs to be initialized.
42
43

  Args:
44
    dataset: A `tf.data.Dataset` object.
45
46

  Returns:
47
    A `tf.data.Iterator`.
48
  """
49
50
51
  iterator = dataset.make_initializable_iterator()
  tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  return iterator
52

53

54
55
def read_dataset(file_read_func, input_files, config,
                 filename_shard_fn=None):
56
57
58
  """Reads a dataset, and handles repetition and shuffling.

  Args:
59
    file_read_func: Function to use in tf_data.parallel_interleave, to
60
      read every individual file into a tf.data.Dataset.
61
62
    input_files: A list of file paths to read.
    config: A input_reader_builder.InputReader object.
63
64
65
66
67
    filename_shard_fn: optional, A funciton used to shard filenames across
      replicas. This function takes as input a TF dataset of filenames and
      is expected to return its sharded version. It is useful when the
      dataset is being loaded on one of possibly many replicas and we want
      to evenly shard the files between the replicas.
68
69
70

  Returns:
    A tf.data.Dataset of (undecoded) tf-records based on config.
71
72
73

  Raises:
    RuntimeError: If no files are found at the supplied path(s).
74
75
76
  """
  # Shard, shuffle, and read files.
  filenames = tf.gfile.Glob(input_files)
77
78
79
  if not filenames:
    raise RuntimeError('Did not find any input files matching the glob pattern '
                       '{}'.format(input_files))
80
81
82
83
84
85
86
87
88
89
90
91
  num_readers = config.num_readers
  if num_readers > len(filenames):
    num_readers = len(filenames)
    tf.logging.warning('num_readers has been reduced to %d to match input file '
                       'shards.' % num_readers)
  filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
  if config.shuffle:
    filename_dataset = filename_dataset.shuffle(
        config.filenames_shuffle_buffer_size)
  elif num_readers > 1:
    tf.logging.warning('`shuffle` is false, but the input data stream is '
                       'still slightly shuffled since `num_readers` > 1.')
92
93
94
  if filename_shard_fn:
    filename_dataset = filename_shard_fn(filename_dataset)

95
96
  filename_dataset = filename_dataset.repeat(config.num_epochs or None)
  records_dataset = filename_dataset.apply(
97
      tf_data.parallel_interleave(
98
99
100
101
102
103
104
105
106
          file_read_func,
          cycle_length=num_readers,
          block_length=config.read_block_length,
          sloppy=config.shuffle))
  if config.shuffle:
    records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
  return records_dataset


107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def shard_function_for_context(input_context):
  """Returns a function that shards filenames based on the input context."""

  if input_context is None:
    return None

  def shard_fn(dataset):
    return dataset.shard(
        input_context.num_input_pipelines, input_context.input_pipeline_id)

  return shard_fn


def build(input_reader_config, batch_size=None, transform_input_data_fn=None,
          input_context=None):
122
123
124
  """Builds a tf.data.Dataset.

  Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
125
  records. Applies a padded batch to the resulting dataset.
126
127
128

  Args:
    input_reader_config: A input_reader_pb2.InputReader object.
129
130
131
    batch_size: Batch size. If batch size is None, no batching is performed.
    transform_input_data_fn: Function to apply transformation to all records,
      or None if no extra decoding is required.
132
133
134
    input_context: optional, A tf.distribute.InputContext object used to
      shard filenames and compute per-replica batch_size when this function
      is being called per-replica.
135
136
137
138
139
140
141
142
143
144
145
146

  Returns:
    A tf.data.Dataset based on the input_reader_config.

  Raises:
    ValueError: On invalid input reader proto.
    ValueError: If no input paths are specified.
  """
  if not isinstance(input_reader_config, input_reader_pb2.InputReader):
    raise ValueError('input_reader_config not of type '
                     'input_reader_pb2.InputReader.')

147
148
  decoder = decoder_builder.build(input_reader_config)

149
150
151
152
153
154
  if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    config = input_reader_config.tf_record_input_reader
    if not config.input_path:
      raise ValueError('At least one input path must be specified in '
                       '`input_reader_config`.')

155
    def process_fn(value):
156
157
      """Sets up tf graph that decodes, transforms and pads input data."""
      processed_tensors = decoder.decode(value)
158
      if transform_input_data_fn is not None:
159
160
        processed_tensors = transform_input_data_fn(processed_tensors)
      return processed_tensors
161

162
163
164
165
    shard_fn = shard_function_for_context(input_context)
    if input_context is not None:
      batch_size = input_context.get_per_replica_batch_size(batch_size)

166
    dataset = read_dataset(
167
        functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
168
        config.input_path[:], input_reader_config, filename_shard_fn=shard_fn)
169
170
    if input_reader_config.sample_1_of_n_examples > 1:
      dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0)
171
172
173
174
175
176
    # TODO(rathodv): make batch size a required argument once the old binaries
    # are deleted.
    if batch_size:
      num_parallel_calls = batch_size * input_reader_config.num_parallel_batches
    else:
      num_parallel_calls = input_reader_config.num_parallel_map_calls
177
178
179
180
181
182
    # TODO(b/123952794): Migrate to V2 function.
    if hasattr(dataset, 'map_with_legacy_function'):
      data_map_fn = dataset.map_with_legacy_function
    else:
      data_map_fn = dataset.map
    dataset = data_map_fn(process_fn, num_parallel_calls=num_parallel_calls)
183
    if batch_size:
184
      dataset = dataset.apply(
185
          tf_data.batch_and_drop_remainder(batch_size))
186
    dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
187
188
    return dataset

189
  raise ValueError('Unsupported input_reader_config.')