"model.properties" did not exist on "21ff889906d5b33d7c05b07cde34c74f8cc93f68"
_video_opt.py 20.2 KB
Newer Older
Francisco Massa's avatar
Francisco Massa committed
1
import math
2
3
import warnings
from fractions import Fraction
4
from typing import List, Tuple, Dict, Optional, Union
5

6
import torch
7

8
from ..extension import _load_library
9
10
11


try:
12
    _load_library("video_reader")
13
    _HAS_VIDEO_OPT = True
14
except (ImportError, OSError):
15
    _HAS_VIDEO_OPT = False
16

Prabhat Roy's avatar
Prabhat Roy committed
17
18
19
20
21
22
try:
    _load_library("Decoder")
    _HAS_VIDEO_DECODER = True
except (ImportError, OSError):
    _HAS_VIDEO_DECODER = False

23
24
25
default_timebase = Fraction(0, 1)


26
27
# simple class for torch scripting
# the complex Fraction class from fractions module is not scriptable
28
class Timebase:
29
30
31
32
33
    __annotations__ = {"numerator": int, "denominator": int}
    __slots__ = ["numerator", "denominator"]

    def __init__(
        self,
34
35
36
        numerator: int,
        denominator: int,
    ) -> None:
37
38
39
40
        self.numerator = numerator
        self.denominator = denominator


41
class VideoMetaData:
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
    __annotations__ = {
        "has_video": bool,
        "video_timebase": Timebase,
        "video_duration": float,
        "video_fps": float,
        "has_audio": bool,
        "audio_timebase": Timebase,
        "audio_duration": float,
        "audio_sample_rate": float,
    }
    __slots__ = [
        "has_video",
        "video_timebase",
        "video_duration",
        "video_fps",
        "has_audio",
        "audio_timebase",
        "audio_duration",
        "audio_sample_rate",
    ]

63
    def __init__(self) -> None:
64
65
66
67
68
69
70
71
72
73
        self.has_video = False
        self.video_timebase = Timebase(0, 1)
        self.video_duration = 0.0
        self.video_fps = 0.0
        self.has_audio = False
        self.audio_timebase = Timebase(0, 1)
        self.audio_duration = 0.0
        self.audio_sample_rate = 0.0


74
def _validate_pts(pts_range: Tuple[int, int]) -> None:
75

76
    if pts_range[1] > 0:
77
78
79
        assert (
            pts_range[0] <= pts_range[1]
        ), """Start pts should not be smaller than end pts, got
80
            start pts: {:d} and end pts: {:d}""".format(
81
82
83
            pts_range[0],
            pts_range[1],
        )
84
85


86
87
88
89
90
91
92
93
def _fill_info(
    vtimebase: torch.Tensor,
    vfps: torch.Tensor,
    vduration: torch.Tensor,
    atimebase: torch.Tensor,
    asample_rate: torch.Tensor,
    aduration: torch.Tensor,
) -> VideoMetaData:
94
95
96
97
    """
    Build update VideoMetaData struct with info about the video
    """
    meta = VideoMetaData()
98
    if vtimebase.numel() > 0:
99
        meta.video_timebase = Timebase(int(vtimebase[0].item()), int(vtimebase[1].item()))
100
        timebase = vtimebase[0].item() / float(vtimebase[1].item())
101
        if vduration.numel() > 0:
102
103
            meta.has_video = True
            meta.video_duration = float(vduration.item()) * timebase
104
    if vfps.numel() > 0:
105
        meta.video_fps = float(vfps.item())
106
    if atimebase.numel() > 0:
107
        meta.audio_timebase = Timebase(int(atimebase[0].item()), int(atimebase[1].item()))
108
        timebase = atimebase[0].item() / float(atimebase[1].item())
109
        if aduration.numel() > 0:
110
111
            meta.has_audio = True
            meta.audio_duration = float(aduration.item()) * timebase
112
    if asample_rate.numel() > 0:
113
        meta.audio_sample_rate = float(asample_rate.item())
114

115
    return meta
116
117


118
119
120
def _align_audio_frames(
    aframes: torch.Tensor, aframe_pts: torch.Tensor, audio_pts_range: Tuple[int, int]
) -> torch.Tensor:
121
122
123
124
125
126
127
    start, end = aframe_pts[0], aframe_pts[-1]
    num_samples = aframes.size(0)
    step_per_aframe = float(end - start + 1) / float(num_samples)
    s_idx = 0
    e_idx = num_samples
    if start < audio_pts_range[0]:
        s_idx = int((audio_pts_range[0] - start) / step_per_aframe)
128
    if audio_pts_range[1] != -1 and end > audio_pts_range[1]:
129
130
131
132
133
        e_idx = int((audio_pts_range[1] - end) / step_per_aframe)
    return aframes[s_idx:e_idx, :]


def _read_video_from_file(
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    filename: str,
    seek_frame_margin: float = 0.25,
    read_video_stream: bool = True,
    video_width: int = 0,
    video_height: int = 0,
    video_min_dimension: int = 0,
    video_max_dimension: int = 0,
    video_pts_range: Tuple[int, int] = (0, -1),
    video_timebase: Fraction = default_timebase,
    read_audio_stream: bool = True,
    audio_samples: int = 0,
    audio_channels: int = 0,
    audio_pts_range: Tuple[int, int] = (0, -1),
    audio_timebase: Fraction = default_timebase,
) -> Tuple[torch.Tensor, torch.Tensor, VideoMetaData]:
149
150
151
152
    """
    Reads a video from a file, returning both the video frames as well as
    the audio frames

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
    Args:
    filename (str): path to the video file
    seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus,
        when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
    read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
    video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
        the size of decoded frames:

            - When video_width = 0, video_height = 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the original frame resolution
            - When video_width = 0, video_height = 0, video_min_dimension != 0,
                and video_max_dimension = 0, keep the aspect ratio and resize the
                frame so that shorter edge size is video_min_dimension
            - When video_width = 0, video_height = 0, video_min_dimension = 0,
                and video_max_dimension != 0, keep the aspect ratio and resize
                the frame so that longer edge size is video_max_dimension
            - When video_width = 0, video_height = 0, video_min_dimension != 0,
                and video_max_dimension != 0, resize the frame so that shorter
                edge size is video_min_dimension, and longer edge size is
                video_max_dimension. The aspect ratio may not be preserved
            - When video_width = 0, video_height != 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the aspect ratio and resize
                the frame so that frame video_height is $video_height
            - When video_width != 0, video_height == 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the aspect ratio and resize
                the frame so that frame video_width is $video_width
            - When video_width != 0, video_height != 0, video_min_dimension = 0,
                and video_max_dimension = 0, resize the frame so that frame
                video_width and  video_height are set to $video_width and
                $video_height, respectively
    video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
    video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream
    read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
    audio_samples (int, optional): audio sampling rate
    audio_channels (int optional): audio channels
    audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
    audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream
190
191

    Returns
192
193
        vframes (Tensor[T, H, W, C]): the `T` video frames
        aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
194
            `K` is the number of audio_channels
195
196
        info (Dict): metadata for the video and audio. Can contain the fields video_fps (float)
            and audio_fps (int)
197
198
199
200
201
202
203
204
205
206
207
208
    """
    _validate_pts(video_pts_range)
    _validate_pts(audio_pts_range)

    result = torch.ops.video_reader.read_video_from_file(
        filename,
        seek_frame_margin,
        0,  # getPtsOnly
        read_video_stream,
        video_width,
        video_height,
        video_min_dimension,
209
        video_max_dimension,
210
211
212
213
214
215
216
217
218
219
220
221
        video_pts_range[0],
        video_pts_range[1],
        video_timebase.numerator,
        video_timebase.denominator,
        read_audio_stream,
        audio_samples,
        audio_channels,
        audio_pts_range[0],
        audio_pts_range[1],
        audio_timebase.numerator,
        audio_timebase.denominator,
    )
222
    vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
223
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
224
225
226
227
228
229
    if aframes.numel() > 0:
        # when audio stream is found
        aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
    return vframes, aframes, info


230
def _read_video_timestamps_from_file(filename: str) -> Tuple[List[int], List[int], VideoMetaData]:
231
232
233
234
235
236
237
238
239
240
241
242
243
    """
    Decode all video- and audio frames in the video. Only pts
    (presentation timestamp) is returned. The actual frame pixel data is not
    copied. Thus, it is much faster than read_video(...)
    """
    result = torch.ops.video_reader.read_video_from_file(
        filename,
        0,  # seek_frame_margin
        1,  # getPtsOnly
        1,  # read_video_stream
        0,  # video_width
        0,  # video_height
        0,  # video_min_dimension
244
        0,  # video_max_dimension
245
246
247
248
249
250
251
252
253
254
255
256
        0,  # video_start_pts
        -1,  # video_end_pts
        0,  # video_timebase_num
        1,  # video_timebase_den
        1,  # read_audio_stream
        0,  # audio_samples
        0,  # audio_channels
        0,  # audio_start_pts
        -1,  # audio_end_pts
        0,  # audio_timebase_num
        1,  # audio_timebase_den
    )
257
    _vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
258
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
259
260
261
262
263
264

    vframe_pts = vframe_pts.numpy().tolist()
    aframe_pts = aframe_pts.numpy().tolist()
    return vframe_pts, aframe_pts, info


265
def _probe_video_from_file(filename: str) -> VideoMetaData:
266
    """
267
    Probe a video file and return VideoMetaData with info about the video
268
269
270
271
272
273
274
    """
    result = torch.ops.video_reader.probe_video_from_file(filename)
    vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
    return info


275
def _read_video_from_memory(
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    video_data: torch.Tensor,
    seek_frame_margin: float = 0.25,
    read_video_stream: int = 1,
    video_width: int = 0,
    video_height: int = 0,
    video_min_dimension: int = 0,
    video_max_dimension: int = 0,
    video_pts_range: Tuple[int, int] = (0, -1),
    video_timebase_numerator: int = 0,
    video_timebase_denominator: int = 1,
    read_audio_stream: int = 1,
    audio_samples: int = 0,
    audio_channels: int = 0,
    audio_pts_range: Tuple[int, int] = (0, -1),
    audio_timebase_numerator: int = 0,
    audio_timebase_denominator: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor]:
293
294
295
    """
    Reads a video from memory, returning both the video frames as well as
    the audio frames
296
    This function is torchscriptable.
297

298
299
    Args:
    video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes):
300
        compressed video content stored in either 1) torch.Tensor 2) python bytes
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    seek_frame_margin (double, optional): seeking frame in the stream is imprecise.
        Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds
    read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0
    video_width/video_height/video_min_dimension/video_max_dimension (int): together decide
        the size of decoded frames:

            - When video_width = 0, video_height = 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the original frame resolution
            - When video_width = 0, video_height = 0, video_min_dimension != 0,
                and video_max_dimension = 0, keep the aspect ratio and resize the
                frame so that shorter edge size is video_min_dimension
            - When video_width = 0, video_height = 0, video_min_dimension = 0,
                and video_max_dimension != 0, keep the aspect ratio and resize
                the frame so that longer edge size is video_max_dimension
            - When video_width = 0, video_height = 0, video_min_dimension != 0,
                and video_max_dimension != 0, resize the frame so that shorter
                edge size is video_min_dimension, and longer edge size is
                video_max_dimension. The aspect ratio may not be preserved
            - When video_width = 0, video_height != 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the aspect ratio and resize
                the frame so that frame video_height is $video_height
            - When video_width != 0, video_height == 0, video_min_dimension = 0,
                and video_max_dimension = 0, keep the aspect ratio and resize
                the frame so that frame video_width is $video_width
            - When video_width != 0, video_height != 0, video_min_dimension = 0,
                and video_max_dimension = 0, resize the frame so that frame
                video_width and  video_height are set to $video_width and
                $video_height, respectively
    video_pts_range (list(int), optional): the start and end presentation timestamp of video stream
    video_timebase_numerator / video_timebase_denominator (float, optional): a rational
        number which denotes timebase in video stream
    read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0
    audio_samples (int, optional): audio sampling rate
    audio_channels (int optional): audio audio_channels
    audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream
    audio_timebase_numerator / audio_timebase_denominator (float, optional):
337
        a rational number which denotes time base in audio stream
338

339
340
341
    Returns:
        vframes (Tensor[T, H, W, C]): the `T` video frames
        aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and
342
343
344
345
346
347
            `K` is the number of channels
    """

    _validate_pts(video_pts_range)
    _validate_pts(audio_pts_range)

348
    if not isinstance(video_data, torch.Tensor):
349
        video_data = torch.frombuffer(video_data, dtype=torch.uint8)
350

351
    result = torch.ops.video_reader.read_video_from_memory(
352
        video_data,
353
354
355
356
357
358
        seek_frame_margin,
        0,  # getPtsOnly
        read_video_stream,
        video_width,
        video_height,
        video_min_dimension,
359
        video_max_dimension,
360
361
        video_pts_range[0],
        video_pts_range[1],
362
363
        video_timebase_numerator,
        video_timebase_denominator,
364
365
366
367
368
        read_audio_stream,
        audio_samples,
        audio_channels,
        audio_pts_range[0],
        audio_pts_range[1],
369
370
        audio_timebase_numerator,
        audio_timebase_denominator,
371
372
    )

373
    vframes, _vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration = result
374

375
376
377
    if aframes.numel() > 0:
        # when audio stream is found
        aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range)
378
379

    return vframes, aframes
380
381


382
383
384
def _read_video_timestamps_from_memory(
    video_data: torch.Tensor,
) -> Tuple[List[int], List[int], VideoMetaData]:
385
386
387
388
389
    """
    Decode all frames in the video. Only pts (presentation timestamp) is returned.
    The actual frame pixel data is not copied. Thus, read_video_timestamps(...)
    is much faster than read_video(...)
    """
390
    if not isinstance(video_data, torch.Tensor):
391
        video_data = torch.frombuffer(video_data, dtype=torch.uint8)
392
    result = torch.ops.video_reader.read_video_from_memory(
393
        video_data,
394
395
396
397
398
399
        0,  # seek_frame_margin
        1,  # getPtsOnly
        1,  # read_video_stream
        0,  # video_width
        0,  # video_height
        0,  # video_min_dimension
400
        0,  # video_max_dimension
401
402
403
404
405
406
407
408
409
410
411
412
        0,  # video_start_pts
        -1,  # video_end_pts
        0,  # video_timebase_num
        1,  # video_timebase_den
        1,  # read_audio_stream
        0,  # audio_samples
        0,  # audio_channels
        0,  # audio_start_pts
        -1,  # audio_end_pts
        0,  # audio_timebase_num
        1,  # audio_timebase_den
    )
413
    _vframes, vframe_pts, vtimebase, vfps, vduration, _aframes, aframe_pts, atimebase, asample_rate, aduration = result
414
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
415
416
417
418

    vframe_pts = vframe_pts.numpy().tolist()
    aframe_pts = aframe_pts.numpy().tolist()
    return vframe_pts, aframe_pts, info
419
420


421
422
423
def _probe_video_from_memory(
    video_data: torch.Tensor,
) -> VideoMetaData:
424
    """
425
426
    Probe a video in memory and return VideoMetaData with info about the video
    This function is torchscriptable
427
428
    """
    if not isinstance(video_data, torch.Tensor):
429
        video_data = torch.frombuffer(video_data, dtype=torch.uint8)
430
431
432
433
    result = torch.ops.video_reader.probe_video_from_memory(video_data)
    vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result
    info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
    return info
Francisco Massa's avatar
Francisco Massa committed
434
435


436
437
438
def _convert_to_sec(
    start_pts: Union[float, Fraction], end_pts: Union[float, Fraction], pts_unit: str, time_base: Fraction
) -> Tuple[Union[float, Fraction], Union[float, Fraction], str]:
439
    if pts_unit == "pts":
440
441
        start_pts = float(start_pts * time_base)
        end_pts = float(end_pts * time_base)
442
        pts_unit = "sec"
443
444
445
    return start_pts, end_pts, pts_unit


446
447
448
449
450
451
def _read_video(
    filename: str,
    start_pts: Union[float, Fraction] = 0,
    end_pts: Optional[Union[float, Fraction]] = None,
    pts_unit: str = "pts",
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, float]]:
Francisco Massa's avatar
Francisco Massa committed
452
453
454
    if end_pts is None:
        end_pts = float("inf")

455
456
457
458
459
    if pts_unit == "pts":
        warnings.warn(
            "The pts_unit 'pts' gives wrong results and will be removed in a "
            + "follow-up version. Please use pts_unit 'sec'."
        )
Francisco Massa's avatar
Francisco Massa committed
460
461
462

    info = _probe_video_from_file(filename)

463
464
    has_video = info.has_video
    has_audio = info.has_audio
465
466
467
468
469
470
471
    video_pts_range = (0, -1)
    video_timebase = default_timebase
    audio_pts_range = (0, -1)
    audio_timebase = default_timebase
    time_base = default_timebase

    if has_video:
472
        video_timebase = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
473
474
475
        time_base = video_timebase

    if has_audio:
476
        audio_timebase = Fraction(info.audio_timebase.numerator, info.audio_timebase.denominator)
477
478
479
        time_base = time_base if time_base else audio_timebase

    # video_timebase is the default time_base
480
    start_pts_sec, end_pts_sec, pts_unit = _convert_to_sec(start_pts, end_pts, pts_unit, time_base)
Francisco Massa's avatar
Francisco Massa committed
481
482

    def get_pts(time_base):
483
484
        start_offset = start_pts_sec
        end_offset = end_pts_sec
485
        if pts_unit == "sec":
486
            start_offset = int(math.floor(start_pts_sec * (1 / time_base)))
Francisco Massa's avatar
Francisco Massa committed
487
            if end_offset != float("inf"):
488
                end_offset = int(math.ceil(end_pts_sec * (1 / time_base)))
Francisco Massa's avatar
Francisco Massa committed
489
490
491
492
493
494
495
496
497
498
        if end_offset == float("inf"):
            end_offset = -1
        return start_offset, end_offset

    if has_video:
        video_pts_range = get_pts(video_timebase)

    if has_audio:
        audio_pts_range = get_pts(audio_timebase)

499
    vframes, aframes, info = _read_video_from_file(
Francisco Massa's avatar
Francisco Massa committed
500
501
502
503
504
505
506
507
        filename,
        read_video_stream=True,
        video_pts_range=video_pts_range,
        video_timebase=video_timebase,
        read_audio_stream=True,
        audio_pts_range=audio_pts_range,
        audio_timebase=audio_timebase,
    )
508
509
    _info = {}
    if has_video:
510
        _info["video_fps"] = info.video_fps
511
    if has_audio:
512
        _info["audio_fps"] = info.audio_sample_rate
513
514

    return vframes, aframes, _info
Francisco Massa's avatar
Francisco Massa committed
515
516


517
518
519
def _read_video_timestamps(
    filename: str, pts_unit: str = "pts"
) -> Tuple[Union[List[int], List[Fraction]], Optional[float]]:
520
521
522
523
524
    if pts_unit == "pts":
        warnings.warn(
            "The pts_unit 'pts' gives wrong results and will be removed in a "
            + "follow-up version. Please use pts_unit 'sec'."
        )
Francisco Massa's avatar
Francisco Massa committed
525

526
    pts: Union[List[int], List[Fraction]]
Francisco Massa's avatar
Francisco Massa committed
527
528
    pts, _, info = _read_video_timestamps_from_file(filename)

529
    if pts_unit == "sec":
530
        video_time_base = Fraction(info.video_timebase.numerator, info.video_timebase.denominator)
Francisco Massa's avatar
Francisco Massa committed
531
532
        pts = [x * video_time_base for x in pts]

533
    video_fps = info.video_fps if info.has_video else None
Francisco Massa's avatar
Francisco Massa committed
534
535

    return pts, video_fps