data_pipeline.py 12.9 KB
Newer Older
Frederick Liu's avatar
Frederick Liu committed
1
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Frederick Liu's avatar
Frederick Liu committed
14

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
"""Input pipeline for the transformer model to read, filter, and batch examples.

Two things to note in the pipeline:

1. Batching scheme

   The examples encoded in the TFRecord files contain data in the format:
     {"inputs": [variable length array of integers],
      "targets": [variable length array of integers]}
   Where integers in the arrays refer to tokens in the English and German vocab
   file (named `vocab.ende.32768`).

   Prior to batching, elements in the dataset are grouped by length (max between
   "inputs" and "targets" length). Each group is then batched such that:
     group_batch_size * length <= batch_size.

   Another way to view batch_size is the maximum number of tokens in each batch.

   Once batched, each element in the dataset will have the shape:
     {"inputs": [group_batch_size, padded_input_length],
      "targets": [group_batch_size, padded_target_length]}
   Lengths are padded to the longest "inputs" or "targets" sequence in the batch
   (padded_input_length and padded_target_length can be different).

   This batching scheme decreases the fraction of padding tokens per training
   batch, thus improving the training speed significantly.

2. Shuffling

   While training, the dataset is shuffled in two places in the code. The first
   is the list of training files. Second, while reading records using
   `parallel_interleave`, the `sloppy` argument is used to generate randomness
   in the order of the examples.
"""

import os

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
52
from absl import logging
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import tensorflow as tf

from official.utils.misc import model_helpers

# Buffer size for reading records from a TFRecord file. Each training file is
# 7.2 MB, so 8 MB allows an entire file to be kept in memory.
_READ_RECORD_BUFFER = 8 * 1000 * 1000

# Example grouping constants. Defines length boundaries for each group.
# These values are the defaults used in Tensor2Tensor.
_MIN_BOUNDARY = 8
_BOUNDARY_SCALE = 1.1


def _load_records(filename):
  """Read file and return a dataset of tf.Examples."""
  return tf.data.TFRecordDataset(filename, buffer_size=_READ_RECORD_BUFFER)


def _parse_example(serialized_example):
  """Return inputs and targets Tensors from a serialized tf.Example."""
  data_fields = {
      "inputs": tf.io.VarLenFeature(tf.int64),
      "targets": tf.io.VarLenFeature(tf.int64)
  }
  parsed = tf.io.parse_single_example(serialized_example, data_fields)
  inputs = tf.sparse.to_dense(parsed["inputs"])
  targets = tf.sparse.to_dense(parsed["targets"])
  return inputs, targets


def _filter_max_length(example, max_length=256):
  """Indicates whether the example's length is lower than the maximum length."""
Hongkun Yu's avatar
Hongkun Yu committed
86
87
88
  return tf.logical_and(
      tf.size(example[0]) <= max_length,
      tf.size(example[1]) <= max_length)
89
90
91
92
93
94
95
96


def _get_example_length(example):
  """Returns the maximum length between the example inputs and targets."""
  length = tf.maximum(tf.shape(example[0])[0], tf.shape(example[1])[0])
  return length


Hongkun Yu's avatar
Hongkun Yu committed
97
98
99
def _create_min_max_boundaries(max_length,
                               min_boundary=_MIN_BOUNDARY,
                               boundary_scale=_BOUNDARY_SCALE):
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  """Create min and max boundary lists up to max_length.

  For example, when max_length=24, min_boundary=4 and boundary_scale=2, the
  returned values will be:
    buckets_min = [0, 4, 8, 16, 24]
    buckets_max = [4, 8, 16, 24, 25]

  Args:
    max_length: The maximum length of example in dataset.
    min_boundary: Minimum length in boundary.
    boundary_scale: Amount to scale consecutive boundaries in the list.

  Returns:
    min and max boundary lists

  """
  # Create bucket boundaries list by scaling the previous boundary or adding 1
  # (to ensure increasing boundary sizes).
  bucket_boundaries = []
  x = min_boundary
  while x < max_length:
    bucket_boundaries.append(x)
    x = max(x + 1, int(x * boundary_scale))

  # Create min and max boundary lists from the initial list.
  buckets_min = [0] + bucket_boundaries
  buckets_max = bucket_boundaries + [max_length + 1]
  return buckets_min, buckets_max


def _batch_examples(dataset, batch_size, max_length):
  """Group examples by similar lengths, and return batched dataset.

  Each batch of similar-length examples are padded to the same length, and may
  have different number of elements in each batch, such that:
    group_batch_size * padded_length <= batch_size.

  This decreases the number of padding tokens per batch, which improves the
  training speed.

  Args:
    dataset: Dataset of unbatched examples.
    batch_size: Max number of tokens per batch of examples.
    max_length: Max number of tokens in an example input or target sequence.

  Returns:
    Dataset of batched examples with similar lengths.
  """
  # Get min and max boundary lists for each example. These are used to calculate
  # the `bucket_id`, which is the index at which:
  # buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id]
  # Note that using both min and max lists improves the performance.
  buckets_min, buckets_max = _create_min_max_boundaries(max_length)

  # Create list of batch sizes for each bucket_id, so that
  # bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size
Ruoxin Sang's avatar
Ruoxin Sang committed
156
  bucket_batch_sizes = [int(batch_size) // x for x in buckets_max]
157
158
159
160
161
162
163
  # bucket_id will be a tensor, so convert this list to a tensor as well.
  bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)

  def example_to_bucket_id(example_input, example_target):
    """Return int64 bucket id for this example, calculated based on length."""
    seq_length = _get_example_length((example_input, example_target))

Haoyu Zhang's avatar
Haoyu Zhang committed
164
    # TODO(xunkai): investigate if removing code branching improves performance.
165
    conditions_c = tf.logical_and(
Hongkun Yu's avatar
Hongkun Yu committed
166
167
        tf.less_equal(buckets_min, seq_length), tf.less(seq_length,
                                                        buckets_max))
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    bucket_id = tf.reduce_min(tf.where(conditions_c))
    return bucket_id

  def window_size_fn(bucket_id):
    """Return number of examples to be grouped when given a bucket id."""
    return bucket_batch_sizes[bucket_id]

  def batching_fn(bucket_id, grouped_dataset):
    """Batch and add padding to a dataset of elements with similar lengths."""
    bucket_batch_size = window_size_fn(bucket_id)

    # Batch the dataset and add padding so that all input sequences in the
    # examples have the same length, and all target sequences have the same
    # lengths as well. Resulting lengths of inputs and targets can differ.
    return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None]))

Hongkun Yu's avatar
Hongkun Yu committed
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
  return dataset.apply(
      tf.data.experimental.group_by_window(
          key_func=example_to_bucket_id,
          reduce_func=batching_fn,
          window_size=None,
          window_size_func=window_size_fn))


def _read_and_batch_from_files(file_pattern,
                               batch_size,
                               max_length,
                               max_io_parallelism,
                               shuffle,
                               repeat,
                               static_batch=False,
                               num_replicas=1,
                               ctx=None):
201
202
203
204
  """Create dataset where each item is a dict of "inputs" and "targets".

  Args:
    file_pattern: String used to match the input TFRecord files.
205
    batch_size: Maximum number of tokens per global batch of examples.
206
    max_length: Maximum number of tokens per example
Abdullah Rashwan's avatar
Abdullah Rashwan committed
207
    max_io_parallelism: Max number of cpu cores for parallel input processing.
208
209
210
211
    shuffle: If true, randomizes order of elements.
    repeat: Number of times to repeat the dataset. If None, the dataset is
      repeated forever.
    static_batch: Whether the batches in the dataset should have static shapes.
Hongkun Yu's avatar
Hongkun Yu committed
212
213
214
215
216
217
218
219
      If True, the input is batched so that every batch has the shape
      [batch_size // max_length, max_length]. If False, the input is grouped by
      length, and batched so that batches may have different
      shapes [N, M], where: N * M <= batch_size M <= max_length In general, this
        setting should be False. Dynamic shapes allow the inputs to be grouped
        so that the number of padding tokens is minimized, and helps model
        training. In cases where the input shape must be static (e.g. running on
        TPU), this setting should be set to True.
220
221
222
    num_replicas: Number of GPUs or other workers. We will generate global
      batches, and each global batch is equally divisible by number of replicas.
      Currently it is only effective when static_batch==True. TODO: make it
Hongkun Yu's avatar
Hongkun Yu committed
223
        effective when static_batch=False.
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
224
    ctx: Input context.
225
226
227
228
229
230

  Returns:
    tf.data.Dataset object containing examples loaded from the files.
  """
  dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle)

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
231
232
233
234
  if ctx and ctx.num_input_pipelines > 1:
    logging.info("Shard %d of the dataset.", ctx.input_pipeline_id)
    dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)

235
236
  # Read files and interleave results. When training, the order of the examples
  # will be non-deterministic.
237
238
  options = tf.data.Options()
  options.experimental_deterministic = False
239
240
  dataset = dataset.interleave(
      _load_records,
Abdullah Rashwan's avatar
Abdullah Rashwan committed
241
      cycle_length=max_io_parallelism,
242
      num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)
243
244

  # Parse each tf.Example into a dictionary
Frederick Liu's avatar
Frederick Liu committed
245
  # TODO: Look into prefetch_input_elements for performance optimization.  # pylint: disable=g-bad-todo
Hongkun Yu's avatar
Hongkun Yu committed
246
247
  dataset = dataset.map(
      _parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
248
249
250

  # Remove examples where the input or target length exceeds the maximum length,
  dataset = dataset.filter(lambda x, y: _filter_max_length((x, y), max_length))
guptapriya's avatar
guptapriya committed
251

252
253
  if static_batch:
    dataset = dataset.padded_batch(
254
255
256
        # First calculate batch size (token number) per worker, then divide it
        # into sentences, and finally expand to a global batch. It could prove
        # the global batch divisble for distribution strategy.
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
257
        int(batch_size // num_replicas // max_length * num_replicas),
Hongkun Yu's avatar
Hongkun Yu committed
258
259
        ([max_length], [max_length]),
        drop_remainder=True)
260
261
  else:
    # Group and batch such that each batch has examples of similar length.
Haoyu Zhang's avatar
Haoyu Zhang committed
262
263
    # TODO(xunkai): _batch_examples might need to do something special for
    # num_replicas.
264
265
266
267
268
269
270
271
272
273
274
    dataset = _batch_examples(dataset, batch_size, max_length)

  dataset = dataset.repeat(repeat)

  # Prefetch the next element to improve speed of input pipeline.
  dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
  return dataset


def _generate_synthetic_data(params):
  """Create synthetic data based on the parameter batch size."""
Ruoxin Sang's avatar
Ruoxin Sang committed
275
276
  batch_size = int(params["batch_size"] // params["max_length"])
  length = params["max_length"]
guptapriya's avatar
guptapriya committed
277
278
  dataset = model_helpers.generate_synthetic_data(
      input_shape=tf.TensorShape([length]),
279
      input_value=1,
guptapriya's avatar
guptapriya committed
280
      input_dtype=tf.int64,
guptapriya's avatar
guptapriya committed
281
      label_shape=tf.TensorShape([length]),
282
      label_value=1,
guptapriya's avatar
guptapriya committed
283
      label_dtype=tf.int64,
284
  )
Ruoxin Sang's avatar
Ruoxin Sang committed
285
286
287
288
289
  if params["static_batch"]:
    dataset = dataset.batch(batch_size, drop_remainder=True)
  else:
    dataset = dataset.padded_batch(batch_size, ([None], [None]))
  return dataset
290
291


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
292
def train_input_fn(params, ctx=None):
293
294
295
296
297
  """Load and return dataset of batched examples for use during training."""
  file_pattern = os.path.join(params["data_dir"] or "", "*train*")
  if params["use_synthetic_data"]:
    return _generate_synthetic_data(params)
  return _read_and_batch_from_files(
Hongkun Yu's avatar
Hongkun Yu committed
298
299
300
301
302
303
304
305
306
      file_pattern,
      params["batch_size"],
      params["max_length"],
      params["max_io_parallelism"],
      shuffle=True,
      repeat=params["repeat_dataset"],
      static_batch=params["static_batch"],
      num_replicas=params["num_gpus"],
      ctx=ctx)
307
308


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
309
def eval_input_fn(params, ctx=None):
310
311
312
313
314
  """Load and return dataset of batched examples for use during evaluation."""
  file_pattern = os.path.join(params["data_dir"] or "", "*dev*")
  if params["use_synthetic_data"]:
    return _generate_synthetic_data(params)
  return _read_and_batch_from_files(
Hongkun Yu's avatar
Hongkun Yu committed
315
316
317
318
319
320
321
322
      file_pattern,
      params["batch_size"],
      params["max_length"],
      params["max_io_parallelism"],
      shuffle=False,
      repeat=1,
      static_batch=params["static_batch"],
      num_replicas=params["num_gpus"],
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
323
      ctx=ctx)
324
325
326
327
328


def map_data_for_transformer_fn(x, y):
  """Maps data for training, and handles weried behaviors for different vers."""
  # Will transform input x and targets y into tuple(x, y) as new model inputs.
329
330
  # For TF v2, the 2nd parameter is omitted to make Keras training work.
  return ((x, y),)