yt8m_input.py 18.5 KB
Newer Older
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
1
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Hye Yoon's avatar
Hye Yoon committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
15
16

"""class YT8MFrameFeatureReader(BaseReader).

Hye Yoon's avatar
Hye Yoon committed
17
18
19
20
21
22
23
  Reads TFRecords of SequenceExamples.

  The TFRecords must contain SequenceExamples with the sparse in64 'labels'
  context feature and a fixed length byte-quantized feature vector, obtained
  from the features in 'feature_names'. The quantized features will be mapped
  back into a range between min_quantized_value and max_quantized_value.
  link for details: https://research.google.com/youtube8m/download.html
24
"""
Hye Yoon's avatar
Hye Yoon committed
25
26
27
from typing import Dict

import tensorflow as tf
Yeqing Li's avatar
Yeqing Li committed
28
from official.projects.yt8m.dataloaders import utils
Yeqing Li's avatar
Yeqing Li committed
29
30
31
from official.vision.configs import video_classification as exp_cfg
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
Hye Yoon's avatar
Hye Yoon committed
32
33
34


def resize_axis(tensor, axis, new_size, fill_value=0):
Yulv-git's avatar
Yulv-git committed
35
  """Truncates or pads a tensor to new_size on a given axis.
Hye Yoon's avatar
Hye Yoon committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be cast
      to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
61
62
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
Hye Yoon's avatar
Hye Yoon committed
63
64
65
66
67
68
69
70
71
  ], axis)

  # Update shape.
  new_shape = tensor.shape.as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized = tf.ensure_shape(resized, new_shape)
  return resized


72
73
def _process_segment_and_label(video_matrix, num_frames, contexts,
                               segment_labels, segment_size,
Hye Yoon's avatar
Hye Yoon committed
74
75
                               num_classes) -> Dict[str, tf.Tensor]:
  """Processes a batched Tensor of frames.
76

Hye Yoon's avatar
Hye Yoon committed
77
78
79
80
81
82
  The same parameters used in process should be used here.
  Args:
    video_matrix: different features concatenated into one matrix
    num_frames: Number of frames per subclip.
    contexts: context information extracted from decoder
    segment_labels: if we read segment labels instead.
83
    segment_size: the segment_size used for reading segments. Segment length.
Hye Yoon's avatar
Hye Yoon committed
84
85
86
87
88
89
    num_classes: a positive integer for the number of classes.

  Returns:
    output: dictionary containing batch information
  """
  # Partition frame-level feature matrix to segment-level feature matrix.
90
  batch_video_ids = None
Hye Yoon's avatar
Hye Yoon committed
91
92
93
94
  if segment_labels:
    start_times = contexts["segment_start_times"].values
    # Here we assume all the segments that started at the same start time has
    # the same segment_size.
95
    uniq_start_times, seg_idxs = tf.unique(start_times, out_idx=tf.dtypes.int64)
Hye Yoon's avatar
Hye Yoon committed
96
    # Range gather matrix, e.g., [[0,1,2],[1,2,3]] for segment_size == 3.
97
98
99
    range_mtx = tf.expand_dims(
        uniq_start_times, axis=-1) + tf.expand_dims(
            tf.range(0, segment_size, dtype=tf.int64), axis=0)
Hye Yoon's avatar
Hye Yoon committed
100
101
102
103
    # Shape: [num_segment, segment_size, feature_dim].
    batch_video_matrix = tf.gather_nd(video_matrix,
                                      tf.expand_dims(range_mtx, axis=-1))
    num_segment = tf.shape(batch_video_matrix)[0]
104
105
106
    if "id" in contexts:
      batch_video_ids = tf.reshape(
          tf.tile([contexts["id"]], [num_segment]), (num_segment,))
107
108
    batch_frames = tf.reshape(
        tf.tile([segment_size], [num_segment]), (num_segment,))
Hye Yoon's avatar
Hye Yoon committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    batch_frames = tf.cast(tf.expand_dims(batch_frames, 1), tf.float32)

    # For segment labels, all labels are not exhaustively rated. So we only
    # evaluate the rated labels.

    # Label indices for each segment, shape: [num_segment, 2].
    label_indices = tf.stack([seg_idxs, contexts["segment_labels"].values],
                             axis=-1)
    label_values = contexts["segment_scores"].values
    sparse_labels = tf.sparse.SparseTensor(label_indices, label_values,
                                           (num_segment, num_classes))
    batch_labels = tf.sparse.to_dense(sparse_labels, validate_indices=False)

    sparse_label_weights = tf.sparse.SparseTensor(
123
124
125
126
        label_indices, tf.ones_like(label_values, dtype=tf.float32),
        (num_segment, num_classes))
    batch_label_weights = tf.sparse.to_dense(
        sparse_label_weights, validate_indices=False)
Hye Yoon's avatar
Hye Yoon committed
127
128
129
130
131
    # output_dict = utils.get_segments(batch_video_matrix, batch_frames, 5)
  else:
    # Process video-level labels.
    label_indices = contexts["labels"].values
    sparse_labels = tf.sparse.SparseTensor(
132
133
134
135
        tf.expand_dims(label_indices, axis=-1),
        tf.ones_like(contexts["labels"].values, dtype=tf.bool), (num_classes,))
    labels = tf.sparse.to_dense(
        sparse_labels, default_value=False, validate_indices=False)
Hye Yoon's avatar
Hye Yoon committed
136
137

    # convert to batch format.
138
139
    if "id" in contexts:
      batch_video_ids = tf.expand_dims(contexts["id"], 0)
Hye Yoon's avatar
Hye Yoon committed
140
141
142
143
144
145
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)
    batch_label_weights = None

  output_dict = {
146
147
148
      "video_matrix": batch_video_matrix,
      "labels": batch_labels,
      "num_frames": batch_frames,
Hye Yoon's avatar
Hye Yoon committed
149
  }
150
151
  if batch_video_ids is not None:
    output_dict["video_ids"] = batch_video_ids
Hye Yoon's avatar
Hye Yoon committed
152
153
154
155
156
157
  if batch_label_weights is not None:
    output_dict["label_weights"] = batch_label_weights

  return output_dict


158
159
def _get_video_matrix(features, feature_size, dtype, max_frames,
                      max_quantized_value, min_quantized_value):
Hye Yoon's avatar
Hye Yoon committed
160
161
  """Decodes features from an input string and quantizes it.

162
  Args:
163
164
165
166
    features: raw feature values.
    feature_size: length of each frame feature vector.
    dtype: raw type of the feature.
    max_frames: number of frames (rows) in the output feature_matrix.
167
168
    max_quantized_value: the maximum of the quantized value.
    min_quantized_value: the minimum of the quantized value.
Hye Yoon's avatar
Hye Yoon committed
169

170
171
172
173
  Returns:
    feature_matrix: matrix of all frame-features
    num_frames: number of frames in the sequence
  """
174
  decoded_features = tf.reshape(features, [-1, feature_size])
Hye Yoon's avatar
Hye Yoon committed
175
176

  num_frames = tf.math.minimum(tf.shape(decoded_features)[0], max_frames)
177
178
179
180
181
  if dtype.is_integer:
    feature_matrix = utils.Dequantize(decoded_features, max_quantized_value,
                                      min_quantized_value)
  else:
    feature_matrix = decoded_features
Hye Yoon's avatar
Hye Yoon committed
182
183
184
185
  feature_matrix = resize_axis(feature_matrix, 0, max_frames)
  return feature_matrix, num_frames


186
187
def _concat_features(features, feature_names, feature_sizes, feature_dtypes,
                     max_frames, max_quantized_value, min_quantized_value):
188
  """Loads (potentially) different types of features and concatenates them.
Hye Yoon's avatar
Hye Yoon committed
189

190
191
192
193
  Args:
      features: raw feature values
      feature_names: list of feature names
      feature_sizes: list of features sizes
194
      feature_dtypes: dtype of the feature.
195
196
197
      max_frames: number of frames in the sequence
      max_quantized_value: the maximum of the quantized value.
      min_quantized_value: the minimum of the quantized value.
Hye Yoon's avatar
Hye Yoon committed
198

199
200
201
202
  Returns:
      video_matrix: different features concatenated into one matrix
      num_frames: the number of frames in the video
  """
Hye Yoon's avatar
Hye Yoon committed
203
204
205
206
207

  num_features = len(feature_names)
  assert num_features > 0, "No feature selected: feature_names is empty!"

  assert len(feature_names) == len(feature_sizes), (
208
209
      "length of feature_names (={}) != length of feature_sizes (={})".format(
          len(feature_names), len(feature_sizes)))
210
211
212
  assert len(feature_names) == len(feature_dtypes), (
      "length of feature_names (={}) != length of feature_sizes (={})".format(
          len(feature_names), len(feature_dtypes)))
Hye Yoon's avatar
Hye Yoon committed
213
214
215

  num_frames = -1  # the number of frames in the video
  feature_matrices = [None] * num_features  # an array of different features
216
  for i in range(num_features):
Hye Yoon's avatar
Hye Yoon committed
217
    feature_matrix, num_frames_in_this_feature = _get_video_matrix(
218
219
220
221
222
223
        features[feature_names[i]],
        feature_sizes[i],
        tf.dtypes.as_dtype(feature_dtypes[i]),
        max_frames,
        max_quantized_value,
        min_quantized_value)
Hye Yoon's avatar
Hye Yoon committed
224
225
226
    if num_frames == -1:
      num_frames = num_frames_in_this_feature

227
    feature_matrices[i] = feature_matrix
Hye Yoon's avatar
Hye Yoon committed
228
229
230
231
232
233
234
235
236
237
238
239
240

  # cap the number of frames at self.max_frames
  num_frames = tf.minimum(num_frames, max_frames)

  # concatenate different features
  video_matrix = tf.concat(feature_matrices, 1)

  return video_matrix, num_frames


class Decoder(decoder.Decoder):
  """A tf.Example decoder for classification task."""

241
242
243
244
  def __init__(
      self,
      input_params: exp_cfg.DataConfig,
  ):
Hye Yoon's avatar
Hye Yoon committed
245
246
247

    self._segment_labels = input_params.segment_labels
    self._feature_names = input_params.feature_names
248
249
250
251
252
    self._feature_sources = input_params.feature_sources
    self._feature_sizes = input_params.feature_sizes
    self._feature_dtypes = input_params.feature_dtypes
    self._feature_from_bytes = input_params.feature_from_bytes
    self._include_video_id = input_params.include_video_id
253
    self._label_field = input_params.label_field
254
255
256
257
258
259
260
261
262
263

    assert len(self._feature_names) == len(self._feature_sources), (
        "length of feature_names (={}) != length of feature_sizes (={})".format(
            len(self._feature_names), len(self._feature_sources)))

    self._context_features = {}
    self._sequence_features = {}
    if self._include_video_id:
      self._context_features["id"] = tf.io.FixedLenFeature([], tf.string)

Hye Yoon's avatar
Hye Yoon committed
264
265
    if self._segment_labels:
      self._context_features.update({
266
267
268
269
270
          # There is no need to read end-time given we always assume the segment
          # has the same size.
          "segment_labels": tf.io.VarLenFeature(tf.int64),
          "segment_start_times": tf.io.VarLenFeature(tf.int64),
          "segment_scores": tf.io.VarLenFeature(tf.float32)
Hye Yoon's avatar
Hye Yoon committed
271
272
      })
    else:
273
274
      self._context_features.update(
          {self._label_field: tf.io.VarLenFeature(tf.int64)})
Hye Yoon's avatar
Hye Yoon committed
275

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    for i, name in enumerate(self._feature_names):
      if self._feature_from_bytes[i]:
        feature_type = tf.io.FixedLenSequenceFeature([], dtype=tf.string)
      else:
        dtype = tf.dtypes.as_dtype(self._feature_dtypes[i])
        feature_shape = [self._feature_sizes[i]]
        if self._feature_sources[i] == "feature":
          feature_type = tf.io.FixedLenSequenceFeature(feature_shape, dtype)
        else:
          feature_type = tf.io.FixedLenFeature(feature_shape, dtype)
      if self._feature_sources[i] == "feature":
        self._sequence_features[name] = feature_type
      elif self._feature_sources[i] == "context":
        self._context_features[name] = feature_type
      else:
        raise ValueError(
            f"Unknow feature source {self._feature_sources[i]} for {name}")
Hye Yoon's avatar
Hye Yoon committed
293
294
295
296
297

  def decode(self, serialized_example):
    """Parses a single tf.Example into image and label tensors."""

    contexts, features = tf.io.parse_single_sequence_example(
298
299
300
        serialized_example,
        context_features=self._context_features,
        sequence_features=self._sequence_features)
Hye Yoon's avatar
Hye Yoon committed
301

302
303
304
305
306
307
308
309
310
311
    decoded_tensor = {**contexts, **features}
    for i, name in enumerate(self._feature_names):
      # Convert the VarLen feature to dense tensor.
      if self._feature_from_bytes[i]:
        dtype = tf.dtypes.as_dtype(self._feature_dtypes[i])
        decoded_tensor[name] = tf.cast(
            tf.io.decode_raw(decoded_tensor[name], dtype), tf.float32),
      else:
        if isinstance(decoded_tensor[name], tf.SparseTensor):
          decoded_tensor[name] = tf.sparse.to_dense(decoded_tensor[name])
312
313
    if not self._segment_labels:
      decoded_tensor["labels"] = decoded_tensor[self._label_field]
314
    return decoded_tensor
Hye Yoon's avatar
Hye Yoon committed
315
316
317
318


class Parser(parser.Parser):
  """Parses a video and label dataset.
319

Hye Yoon's avatar
Hye Yoon committed
320
321
322
323
324
325
    takes the decoded raw tensors dict
    and parse them into a dictionary of tensors
    that can be consumed by the model.
    It will be executed after decoder.
  """

326
327
328
329
330
331
  def __init__(
      self,
      input_params: exp_cfg.DataConfig,
      max_quantized_value=2,
      min_quantized_value=-2,
  ):
Hye Yoon's avatar
Hye Yoon committed
332
333
334
    self._num_classes = input_params.num_classes
    self._segment_size = input_params.segment_size
    self._segment_labels = input_params.segment_labels
335
    self._include_video_id = input_params.include_video_id
Hye Yoon's avatar
Hye Yoon committed
336
337
    self._feature_names = input_params.feature_names
    self._feature_sizes = input_params.feature_sizes
338
    self._feature_dtypes = input_params.feature_dtypes
Hye Yoon's avatar
Hye Yoon committed
339
340
341
342
343
344
345
346
    self._max_frames = input_params.max_frames
    self._max_quantized_value = max_quantized_value
    self._min_quantized_value = min_quantized_value

  def _parse_train_data(self, decoded_tensors):
    """Parses data for training."""
    # loads (potentially) different types of features and concatenates them
    self.video_matrix, self.num_frames = _concat_features(
347
348
349
350
351
        decoded_tensors, self._feature_names, self._feature_sizes,
        self._feature_dtypes, self._max_frames, self._max_quantized_value,
        self._min_quantized_value)
    if not self._include_video_id and "id" in decoded_tensors:
      del decoded_tensors["id"]
352
    output_dict = _process_segment_and_label(self.video_matrix, self.num_frames,
353
                                             decoded_tensors,
354
355
356
                                             self._segment_labels,
                                             self._segment_size,
                                             self._num_classes)
Hye Yoon's avatar
Hye Yoon committed
357
358
359
360
361
362
    return output_dict

  def _parse_eval_data(self, decoded_tensors):
    """Parses data for evaluation."""
    # loads (potentially) different types of features and concatenates them
    self.video_matrix, self.num_frames = _concat_features(
363
364
365
366
367
        decoded_tensors, self._feature_names, self._feature_sizes,
        self._feature_dtypes, self._max_frames, self._max_quantized_value,
        self._min_quantized_value)
    if not self._include_video_id and "id" in decoded_tensors:
      del decoded_tensors["id"]
368
    output_dict = _process_segment_and_label(self.video_matrix, self.num_frames,
369
                                             decoded_tensors,
370
371
372
                                             self._segment_labels,
                                             self._segment_size,
                                             self._num_classes)
Hye Yoon's avatar
Hye Yoon committed
373
374
375
376
377
378
379
380
381
382
383
384
385
    return output_dict  # batched

  def parse_fn(self, is_training):
    """Returns a parse fn that reads and parses raw tensors from the decoder.

    Args:
      is_training: a `bool` to indicate whether it is in training mode.

    Returns:
      parse: a `callable` that takes the serialized example and generate the
        images, labels tuple where labels is a dict of Tensors that contains
        labels.
    """
386

Hye Yoon's avatar
Hye Yoon committed
387
388
389
390
391
392
393
394
395
    def parse(decoded_tensors):
      """Parses the serialized example data."""
      if is_training:
        return self._parse_train_data(decoded_tensors)
      else:
        return self._parse_eval_data(decoded_tensors)

    return parse

396

Hye Yoon's avatar
Hye Yoon committed
397
class PostBatchProcessor():
398
399
  """Processes a video and label dataset which is batched."""

Hye Yoon's avatar
Hye Yoon committed
400
401
402
403
404
405
406
  def __init__(self, input_params: exp_cfg.DataConfig):
    self.segment_labels = input_params.segment_labels
    self.num_classes = input_params.num_classes
    self.segment_size = input_params.segment_size

  def post_fn(self, batched_tensors):
    """Processes batched Tensors."""
407
    video_ids = batched_tensors.get("video_ids", None)
408
409
410
    video_matrix = batched_tensors["video_matrix"]
    labels = batched_tensors["labels"]
    num_frames = batched_tensors["num_frames"]
Hye Yoon's avatar
Hye Yoon committed
411
412
413
414
415
    label_weights = None

    if self.segment_labels:
      # [batch x num_segment x segment_size x num_features]
      # -> [batch * num_segment x segment_size x num_features]
416
417
      if video_ids is not None:
        video_ids = tf.reshape(video_ids, [-1])
Hye Yoon's avatar
Hye Yoon committed
418
419
420
421
      video_matrix = tf.reshape(video_matrix, [-1, self.segment_size, 1152])
      labels = tf.reshape(labels, [-1, self.num_classes])
      num_frames = tf.reshape(num_frames, [-1, 1])

422
423
      label_weights = tf.reshape(batched_tensors["label_weights"],
                                 [-1, self.num_classes])
Hye Yoon's avatar
Hye Yoon committed
424
425

    else:
Yeqing Li's avatar
Yeqing Li committed
426
      # NOTE(b/237445211): Must provide axis argument to tf.squeeze.
427
428
      video_matrix = tf.squeeze(video_matrix, axis=1)
      labels = tf.squeeze(labels, axis=1)
Hye Yoon's avatar
Hye Yoon committed
429
430

    batched_tensors = {
431
432
433
        "video_matrix": video_matrix,
        "labels": labels,
        "num_frames": num_frames,
Hye Yoon's avatar
Hye Yoon committed
434
    }
435
436
    if video_ids is not None:
      batched_tensors["video_ids"] = video_ids
Hye Yoon's avatar
Hye Yoon committed
437
438
439
440
441
442

    if label_weights is not None:
      batched_tensors["label_weights"] = label_weights

    return batched_tensors

443

Hye Yoon's avatar
Hye Yoon committed
444
class TransformBatcher():
445
446
447
  """Performs manual batching on input dataset."""

  def __init__(self, input_params: exp_cfg.DataConfig):
Hye Yoon's avatar
Hye Yoon committed
448
449
450
    self._segment_labels = input_params.segment_labels
    self._global_batch_size = input_params.global_batch_size
    self._is_training = input_params.is_training
451
    self._include_video_id = input_params.include_video_id
Yeqing Li's avatar
Yeqing Li committed
452
    self._drop_remainder = input_params.drop_remainder
Hye Yoon's avatar
Hye Yoon committed
453
454

  def batch_fn(self, dataset, input_context):
455
    """Add padding when segment_labels is true."""
Hye Yoon's avatar
Hye Yoon committed
456
457
458
    per_replica_batch_size = input_context.get_per_replica_batch_size(
        self._global_batch_size) if input_context else self._global_batch_size
    if not self._segment_labels:
Yeqing Li's avatar
Yeqing Li committed
459
460
      dataset = dataset.batch(
          per_replica_batch_size, drop_remainder=self._drop_remainder)
Hye Yoon's avatar
Hye Yoon committed
461
462
    else:
      # add padding
463
464
465
466
467
468
469
470
471
472
473
474
      pad_shapes = {
          "video_matrix": [None, None, None],
          "labels": [None, None],
          "num_frames": [None, None],
          "label_weights": [None, None]
      }
      pad_values = {
          "video_matrix": 0.0,
          "labels": -1.0,
          "num_frames": 0.0,
          "label_weights": 0.0
      }
475
476
477
      if self._include_video_id:
        pad_shapes["video_ids"] = [None]
        pad_values["video_ids"] = None
Hye Yoon's avatar
Hye Yoon committed
478
      dataset = dataset.padded_batch(
479
480
          per_replica_batch_size,
          padded_shapes=pad_shapes,
Yeqing Li's avatar
Yeqing Li committed
481
          drop_remainder=self._drop_remainder,
482
          padding_values=pad_values)
Hye Yoon's avatar
Hye Yoon committed
483
    return dataset