_video_opt.py 19.3 KB
Newer Older
1

2
import importlib
Francisco Massa's avatar
Francisco Massa committed
3
import math
4
5
6
7
8
import os
import warnings
from fractions import Fraction
from typing import List, Tuple

9
10
import numpy as np
import torch
11
12
13
14
15
16


_HAS_VIDEO_OPT = False

try:
    lib_dir = os.path.join(os.path.dirname(__file__), "..")
17
18
19
20
21
22
23
24
25
26
27

    loader_details = (
        importlib.machinery.ExtensionFileLoader,
        importlib.machinery.EXTENSION_SUFFIXES
    )

    extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
    ext_specs = extfinder.find_spec("video_reader")
    if ext_specs is not None:
        torch.ops.load_library(ext_specs.origin)
        _HAS_VIDEO_OPT = True
28
29
except (ImportError, OSError):
    pass
30
31
32
33
34


default_timebase = Fraction(0, 1)


35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# simple class for torch scripting
# the complex Fraction class from fractions module is not scriptable
class Timebase(object):
    __annotations__ = {"numerator": int, "denominator": int}
    __slots__ = ["numerator", "denominator"]

    def __init__(
        self,
        numerator,  # type: int
        denominator,  # type: int
    ):
        # type: (...) -> None
        self.numerator = numerator
        self.denominator = denominator


class VideoMetaData(object):
    __annotations__ = {
        "has_video": bool,
        "video_timebase": Timebase,
        "video_duration": float,
        "video_fps": float,
        "has_audio": bool,
        "audio_timebase": Timebase,
        "audio_duration": float,
        "audio_sample_rate": float,
    }
    __slots__ = [
        "has_video",
        "video_timebase",
        "video_duration",
        "video_fps",
        "has_audio",
        "audio_timebase",
        "audio_duration",
        "audio_sample_rate",
    ]

    def __init__(self):
        self.has_video = False
        self.video_timebase = Timebase(0, 1)
        self.video_duration = 0.0
        self.video_fps = 0.0
        self.has_audio = False
        self.audio_timebase = Timebase(0, 1)
        self.audio_duration = 0.0
        self.audio_sample_rate = 0.0


84
def _validate_pts(pts_range):
85
86
    # type: (List[int]) -> None

87
    if pts_range[1] > 0:
88
89
90
        assert (
            pts_range[0] <= pts_range[1]
        ), """Start pts should not be smaller than end pts, got
91
            start pts: {0:d} and end pts: {1:d}""".format(
92
93
94
            pts_range[0],
            pts_range[1],
        )
95
96


97
def _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration):
98
99
100
101
102
    # type: (torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor) -> VideoMetaData
    """
    Build update VideoMetaData struct with info about the video
    """
    meta = VideoMetaData()
103
    if vtimebase.numel() > 0:
104
105
106
107
        meta.video_timebase = Timebase(
            int(vtimebase[0].item()), int(vtimebase[1].item())
        )
        timebase = vtimebase[0].item() / float(vtimebase[1].item())
108
        if vduration.numel() > 0:
109
110
            meta.has_video = True
            meta.video_duration = float(vduration.item()) * timebase
111
    if vfps.numel() > 0:
112
        meta.video_fps = float(vfps.item())
113
    if atimebase.numel() > 0:
114
115
116
117
        meta.audio_timebase = Timebase(
            int(atimebase[0].item()), int(atimebase[1].item())
        )
        timebase = atimebase[0].item() / float(atimebase[1].item())
118
        if aduration.numel() > 0:
119
120
            meta.has_audio = True
            meta.audio_duration = float(aduration.item()) * timebase
121
    if asample_rate.numel() > 0:
122
        meta.audio_sample_rate = float(asample_rate.item())
123

124
    return meta
125
126
127


def _align_audio_frames(aframes, aframe_pts, audio_pts_range):
128
    # type: (torch.Tensor, torch.Tensor, List[int]) -> torch.Tensor
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
    start, end = aframe_pts[0], aframe_pts[-1]
    num_samples = aframes.size(0)
    step_per_aframe = float(end - start + 1) / float(num_samples)
    s_idx = 0
    e_idx = num_samples
    if start < audio_pts_range[0]:
        s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
    if end > audio_pts_range[1]:
        e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
    return aframes[s_idx:e_idx, :]


def _read_video_from_file(
    filename,
    seek_frame_margin=0.25,
    read_video_stream=True,
    video_width=0,
    video_height=0,
    video_min_dimension=0,
148
    video_max_dimension=0,
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
    video_pts_range=(0, -1),
    video_timebase=default_timebase,
    read_audio_stream=True,
    audio_samples=0,
    audio_channels=0,
    audio_pts_range=(0, -1),
    audio_timebase=default_timebase,
):
    """
    Reads a video from a file, returning both the video frames as well as
    the audio frames

    Args
    ----------
    filename : str
        path to the video file
    seek_frame_margin: double, optional
166
167
        seeking frame in the stream is imprecise. Thus, when video_start_pts
        is specified, we seek the pts earlier by seek_frame_margin seconds
168
169
    read_video_stream: int, optional
        whether read video stream. If yes, set to 1. Otherwise, 0
170
    video_width/video_height/video_min_dimension/video_max_dimension: int
171
        together decide the size of decoded frames
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        - When video_width = 0, video_height = 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the orignal frame resolution
        - When video_width = 0, video_height = 0, video_min_dimension != 0,
            and video_max_dimension = 0, keep the aspect ratio and resize the
            frame so that shorter edge size is video_min_dimension
        - When video_width = 0, video_height = 0, video_min_dimension = 0,
            and video_max_dimension != 0, keep the aspect ratio and resize
            the frame so that longer edge size is video_max_dimension
        - When video_width = 0, video_height = 0, video_min_dimension != 0,
            and video_max_dimension != 0, resize the frame so that shorter
            edge size is video_min_dimension, and longer edge size is
            video_max_dimension. The aspect ratio may not be preserved
        - When video_width = 0, video_height != 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the aspect ratio and resize
            the frame so that frame video_height is $video_height
        - When video_width != 0, video_height == 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the aspect ratio and resize
            the frame so that frame video_width is $video_width
        - When video_width != 0, video_height != 0, video_min_dimension = 0,
            and video_max_dimension = 0, resize the frame so that frame
            video_width and  video_height are set to $video_width and
            $video_height, respectively
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
    video_pts_range : list(int), optional
        the start and end presentation timestamp of video stream
    video_timebase: Fraction, optional
        a Fraction rational number which denotes timebase in video stream
    read_audio_stream: int, optional
        whether read audio stream. If yes, set to 1. Otherwise, 0
    audio_samples: int, optional
        audio sampling rate
    audio_channels: int optional
        audio channels
    audio_pts_range : list(int), optional
        the start and end presentation timestamp of audio stream
    audio_timebase: Fraction, optional
        a Fraction rational number which denotes time base in audio stream

    Returns
    -------
    vframes : Tensor[T, H, W, C]
        the `T` video frames
    aframes : Tensor[L, K]
        the audio frames, where `L` is the number of points and
            `K` is the number of audio_channels
    info : Dict
        metadata for the video and audio. Can contain the fields video_fps (float)
        and audio_fps (int)
    """
    _validate_pts(video_pts_range)
    _validate_pts(audio_pts_range)

    result = torch.ops.video_reader.read_video_from_file(
        filename,
        seek_frame_margin,
        0,  # getPtsOnly
        read_video_stream,
        video_width,
        video_height,
        video_min_dimension,
231
        video_max_dimension,
232
233
234
235
236
237
238
239
240
241
242
243
        video_pts_range[0],
        video_pts_range[1],
        video_timebase.numerator,
        video_timebase.denominator,
        read_audio_stream,
        audio_samples,
        audio_channels,
        audio_pts_range[0],
        audio_pts_range[1],
        audio_timebase.numerator,
        audio_timebase.denominator,
    )
244
245
246
247
    vframes, _vframe_pts, vtimebase, vfps, vduration, \
        aframes, aframe_pts, atimebase, asample_rate, aduration = (
            result
        )
248
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
    if aframes.numel() > 0:
        # when audio stream is found
        aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
    return vframes, aframes, info


def _read_video_timestamps_from_file(filename):
    """
    Decode all video- and audio frames in the video. Only pts
    (presentation timestamp) is returned. The actual frame pixel data is not
    copied. Thus, it is much faster than read_video(...)
    """
    result = torch.ops.video_reader.read_video_from_file(
        filename,
        0,  # seek_frame_margin
        1,  # getPtsOnly
        1,  # read_video_stream
        0,  # video_width
        0,  # video_height
        0,  # video_min_dimension
269
        0,  # video_max_dimension
270
271
272
273
274
275
276
277
278
279
280
281
        0,  # video_start_pts
        -1,  # video_end_pts
        0,  # video_timebase_num
        1,  # video_timebase_den
        1,  # read_audio_stream
        0,  # audio_samples
        0,  # audio_channels
        0,  # audio_start_pts
        -1,  # audio_end_pts
        0,  # audio_timebase_num
        1,  # audio_timebase_den
    )
282
283
    _vframes, vframe_pts, vtimebase, vfps, vduration, \
        _aframes, aframe_pts, atimebase, asample_rate, aduration = (result)
284
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
285
286
287
288
289
290

    vframe_pts = vframe_pts.numpy().tolist()
    aframe_pts = aframe_pts.numpy().tolist()
    return vframe_pts, aframe_pts, info


291
292
def _probe_video_from_file(filename):
    """
293
    Probe a video file and return VideoMetaData with info about the video
294
295
296
297
298
299
300
    """
    result = torch.ops.video_reader.probe_video_from_file(filename)
    vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
    return info


301
def _read_video_from_memory(
302
303
304
305
306
307
    video_data,  # type: torch.Tensor
    seek_frame_margin=0.25,  # type: float
    read_video_stream=1,  # type: int
    video_width=0,  # type: int
    video_height=0,  # type: int
    video_min_dimension=0,  # type: int
308
    video_max_dimension=0,  # type: int
309
310
311
312
313
314
315
316
317
    video_pts_range=(0, -1),  # type: List[int]
    video_timebase_numerator=0,  # type: int
    video_timebase_denominator=1,  # type: int
    read_audio_stream=1,  # type: int
    audio_samples=0,  # type: int
    audio_channels=0,  # type: int
    audio_pts_range=(0, -1),  # type: List[int]
    audio_timebase_numerator=0,  # type: int
    audio_timebase_denominator=1,  # type: int
318
):
319
    # type: (...) -> Tuple[torch.Tensor, torch.Tensor]
320
321
322
    """
    Reads a video from memory, returning both the video frames as well as
    the audio frames
323
    This function is torchscriptable.
324
325
326

    Args
    ----------
327
328
    video_data : data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes
        compressed video content stored in either 1) torch.Tensor 2) python bytes
329
330
331
332
333
    seek_frame_margin: double, optional
        seeking frame in the stream is imprecise. Thus, when video_start_pts is specified,
        we seek the pts earlier by seek_frame_margin seconds
    read_video_stream: int, optional
        whether read video stream. If yes, set to 1. Otherwise, 0
334
    video_width/video_height/video_min_dimension/video_max_dimension: int
335
        together decide the size of decoded frames
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
        - When video_width = 0, video_height = 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the orignal frame resolution
        - When video_width = 0, video_height = 0, video_min_dimension != 0,
            and video_max_dimension = 0, keep the aspect ratio and resize the
            frame so that shorter edge size is video_min_dimension
        - When video_width = 0, video_height = 0, video_min_dimension = 0,
            and video_max_dimension != 0, keep the aspect ratio and resize
            the frame so that longer edge size is video_max_dimension
        - When video_width = 0, video_height = 0, video_min_dimension != 0,
            and video_max_dimension != 0, resize the frame so that shorter
            edge size is video_min_dimension, and longer edge size is
            video_max_dimension. The aspect ratio may not be preserved
        - When video_width = 0, video_height != 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the aspect ratio and resize
            the frame so that frame video_height is $video_height
        - When video_width != 0, video_height == 0, video_min_dimension = 0,
            and video_max_dimension = 0, keep the aspect ratio and resize
            the frame so that frame video_width is $video_width
        - When video_width != 0, video_height != 0, video_min_dimension = 0,
            and video_max_dimension = 0, resize the frame so that frame
            video_width and  video_height are set to $video_width and
            $video_height, respectively
358
359
    video_pts_range : list(int), optional
        the start and end presentation timestamp of video stream
360
361
    video_timebase_numerator / video_timebase_denominator: optional
        a rational number which denotes timebase in video stream
362
363
364
365
366
367
368
369
    read_audio_stream: int, optional
        whether read audio stream. If yes, set to 1. Otherwise, 0
    audio_samples: int, optional
        audio sampling rate
    audio_channels: int optional
        audio audio_channels
    audio_pts_range : list(int), optional
        the start and end presentation timestamp of audio stream
370
371
    audio_timebase_numerator / audio_timebase_denominator: optional
        a rational number which denotes time base in audio stream
372
373
374
375
376
377
378
379
380
381
382
383
384
385

    Returns
    -------
    vframes : Tensor[T, H, W, C]
        the `T` video frames
    aframes : Tensor[L, K]
        the audio frames, where `L` is the number of points and
            `K` is the number of channels
    """

    _validate_pts(video_pts_range)
    _validate_pts(audio_pts_range)

    result = torch.ops.video_reader.read_video_from_memory(
386
        video_data,
387
388
389
390
391
392
        seek_frame_margin,
        0,  # getPtsOnly
        read_video_stream,
        video_width,
        video_height,
        video_min_dimension,
393
        video_max_dimension,
394
395
        video_pts_range[0],
        video_pts_range[1],
396
397
        video_timebase_numerator,
        video_timebase_denominator,
398
399
400
401
402
        read_audio_stream,
        audio_samples,
        audio_channels,
        audio_pts_range[0],
        audio_pts_range[1],
403
404
        audio_timebase_numerator,
        audio_timebase_denominator,
405
406
    )

407
408
409
410
411
    vframes, _vframe_pts, vtimebase, vfps, vduration, \
        aframes, aframe_pts, atimebase, asample_rate, aduration = (
            result
        )

412
413
414
    if aframes.numel() > 0:
        # when audio stream is found
        aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
415
416

    return vframes, aframes
417
418


419
def _read_video_timestamps_from_memory(video_data):
420
421
422
423
424
    """
    Decode all frames in the video. Only pts (presentation timestamp) is returned.
    The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
    is much faster than read_video(...)
    """
425
426
    if not isinstance(video_data, torch.Tensor):
        video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8))
427
    result = torch.ops.video_reader.read_video_from_memory(
428
        video_data,
429
430
431
432
433
434
        0,  # seek_frame_margin
        1,  # getPtsOnly
        1,  # read_video_stream
        0,  # video_width
        0,  # video_height
        0,  # video_min_dimension
435
        0,  # video_max_dimension
436
437
438
439
440
441
442
443
444
445
446
447
        0,  # video_start_pts
        -1,  # video_end_pts
        0,  # video_timebase_num
        1,  # video_timebase_den
        1,  # read_audio_stream
        0,  # audio_samples
        0,  # audio_channels
        0,  # audio_start_pts
        -1,  # audio_end_pts
        0,  # audio_timebase_num
        1,  # audio_timebase_den
    )
448
449
450
451
    _vframes, vframe_pts, vtimebase, vfps, vduration, \
        _aframes, aframe_pts, atimebase, asample_rate, aduration = (
            result
        )
452
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
453
454
455
456

    vframe_pts = vframe_pts.numpy().tolist()
    aframe_pts = aframe_pts.numpy().tolist()
    return vframe_pts, aframe_pts, info
457
458
459


def _probe_video_from_memory(video_data):
460
    # type: (torch.Tensor) -> VideoMetaData
461
    """
462
463
    Probe a video in memory and return VideoMetaData with info about the video
    This function is torchscriptable
464
465
466
467
468
469
470
    """
    if not isinstance(video_data, torch.Tensor):
        video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8))
    result = torch.ops.video_reader.probe_video_from_memory(video_data)
    vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
    return info
Francisco Massa's avatar
Francisco Massa committed
471
472


473
def _read_video(filename, start_pts=0, end_pts=None, pts_unit="pts"):
Francisco Massa's avatar
Francisco Massa committed
474
475
476
    if end_pts is None:
        end_pts = float("inf")

477
478
479
480
481
    if pts_unit == "pts":
        warnings.warn(
            "The pts_unit 'pts' gives wrong results and will be removed in a "
            + "follow-up version. Please use pts_unit 'sec'."
        )
Francisco Massa's avatar
Francisco Massa committed
482
483
484

    info = _probe_video_from_file(filename)

485
486
    has_video = info.has_video
    has_audio = info.has_audio
Francisco Massa's avatar
Francisco Massa committed
487
488
489
490

    def get_pts(time_base):
        start_offset = start_pts
        end_offset = end_pts
491
        if pts_unit == "sec":
Francisco Massa's avatar
Francisco Massa committed
492
493
494
495
496
497
498
499
500
501
            start_offset = int(math.floor(start_pts * (1 / time_base)))
            if end_offset != float("inf"):
                end_offset = int(math.ceil(end_pts * (1 / time_base)))
        if end_offset == float("inf"):
            end_offset = -1
        return start_offset, end_offset

    video_pts_range = (0, -1)
    video_timebase = default_timebase
    if has_video:
502
503
504
        video_timebase = Fraction(
            info.video_timebase.numerator, info.video_timebase.denominator
        )
Francisco Massa's avatar
Francisco Massa committed
505
506
507
508
509
        video_pts_range = get_pts(video_timebase)

    audio_pts_range = (0, -1)
    audio_timebase = default_timebase
    if has_audio:
510
511
512
        audio_timebase = Fraction(
            info.audio_timebase.numerator, info.audio_timebase.denominator
        )
Francisco Massa's avatar
Francisco Massa committed
513
514
        audio_pts_range = get_pts(audio_timebase)

515
    vframes, aframes, info = _read_video_from_file(
Francisco Massa's avatar
Francisco Massa committed
516
517
518
519
520
521
522
523
        filename,
        read_video_stream=True,
        video_pts_range=video_pts_range,
        video_timebase=video_timebase,
        read_audio_stream=True,
        audio_pts_range=audio_pts_range,
        audio_timebase=audio_timebase,
    )
524
525
    _info = {}
    if has_video:
526
        _info["video_fps"] = info.video_fps
527
    if has_audio:
528
        _info["audio_fps"] = info.audio_sample_rate
529
530

    return vframes, aframes, _info
Francisco Massa's avatar
Francisco Massa committed
531
532


533
534
535
536
537
538
def _read_video_timestamps(filename, pts_unit="pts"):
    if pts_unit == "pts":
        warnings.warn(
            "The pts_unit 'pts' gives wrong results and will be removed in a "
            + "follow-up version. Please use pts_unit 'sec'."
        )
Francisco Massa's avatar
Francisco Massa committed
539
540
541

    pts, _, info = _read_video_timestamps_from_file(filename)

542
543
544
545
    if pts_unit == "sec":
        video_time_base = Fraction(
            info.video_timebase.numerator, info.video_timebase.denominator
        )
Francisco Massa's avatar
Francisco Massa committed
546
547
        pts = [x * video_time_base for x in pts]

548
    video_fps = info.video_fps if info.has_video else None
Francisco Massa's avatar
Francisco Massa committed
549
550

    return pts, video_fps