__init__.py 5.49 KB
Newer Older
1
2
import torch

Zhicheng Yan's avatar
Zhicheng Yan committed
3
from ._video_opt import (
4
5
6
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
7
    _probe_video_from_file,
8
9
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
10
    _read_video_from_memory,
11
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
12
    _read_video_timestamps_from_memory,
13
14
15
16
17
)
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
Zhicheng Yan's avatar
Zhicheng Yan committed
18
)
19
20
21
22
23
24
from .image import (
    read_image,
    decode_image,
    encode_jpeg,
    write_jpeg,
    encode_png,
25
    write_png,
26
27
)

28

29
if _HAS_VIDEO_OPT:
30
31
32
33
34
    def _has_video_opt():
        return True
else:
    def _has_video_opt():
        return False
35
36


37
38
39
40
41
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
42

43
    Example:
Bruno Korbar's avatar
Bruno Korbar committed
44
        The following examples creates a :mod:`VideoReader` object, seeks into 2s
45
46
47
        point, and returns a single frame::
                import torchvision
                video_path = "path_to_a_test_video"
48

49
50
                reader = torchvision.io.VideoReader(video_path, "video")
                reader.seek(2.0)
Bruno Korbar's avatar
Bruno Korbar committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
                frame = next(reader)

        :mod:`VideoReader` implements the iterable API, which makes it suitable to
        using it in conjunction with :mod:`itertools` for more advanced reading.
        As such, we can use a :mod:`VideoReader` instance inside for loops::
            reader.seek(2)
            for frame in reader:
                frames.append(frame['data'])
            # additionally, `seek` implements a fluent API, so we can do
            for frame in reader.seek(2):
                frames.append(frame['data'])
        With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
        following code::
            for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
                frames.append(frame['data'])
        and similarly, reading 10 frames after the 2s timestamp can be achieved
        as follows::
            for frame in itertools.islice(reader.seek(2), 10):
                frames.append(frame['data'])

    .. note::

        Each stream descriptor consists of two parts: stream type (e.g. 'video') and
        a unique stream id (which are determined by the video encoding).
        In this way, if the video contaner contains multiple
        streams of the same type, users can acces the one they want.
        If only stream type is passed, the decoder auto-detects first stream of that type.
78

79
    Args:
80

81
        path (string): Path to the video file in supported format
82

Bruno Korbar's avatar
Bruno Korbar committed
83
84
85
        stream (string, optional): descriptor of the required stream, followed by the stream id,
            in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
            Currently available options include ``['video', 'audio']``
86
    """
87

88
89
90
91
    def __init__(self, path, stream="video"):
        if not _has_video_opt():
            raise RuntimeError("Not compiled with video_reader support")
        self._c = torch.classes.torchvision.Video(path, stream)
92

93
94
    def __next__(self):
        """Decodes and returns the next frame of the current stream
95

96
        Returns:
Bruno Korbar's avatar
Bruno Korbar committed
97
98
            (dict): a dictionary with fields ``data`` and ``pts``
            containing decoded frame and corresponding timestamp
99

100
101
102
103
        """
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
Bruno Korbar's avatar
Bruno Korbar committed
104
        return {"data": frame, "pts": pts}
105

106
107
    def __iter__(self):
        return self
108

109
110
    def seek(self, time_s: float):
        """Seek within current stream.
111

112
113
        Args:
            time_s (float): seek time in seconds
114

115
116
117
118
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
Bruno Korbar's avatar
Bruno Korbar committed
119
            the first frame with timestamp larger than ``time_s``.
120
121
122
        """
        self._c.seek(time_s)
        return self
123

124
125
    def get_metadata(self):
        """Returns video metadata
126

127
128
129
130
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
        return self._c.get_metadata()
131

132
133
134
    def set_current_stream(self, stream: str):
        """Set current stream.
        Explicitly define the stream we are operating on.
135

136
        Args:
Bruno Korbar's avatar
Bruno Korbar committed
137
138
            stream (string): descriptor of the required stream. Defaults to ``"video:0"``
                Currently available stream types include ``['video', 'audio']``.
139
140
141
142
143
144
145
146
147
148
149
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
        return self._c.set_current_stream(stream)
150
151


152
__all__ = [
153
154
155
156
157
158
159
160
161
162
163
164
165
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
166
    "Timebase",
167
168
169
170
171
172
173
    "read_image",
    "decode_image",
    "encode_jpeg",
    "write_jpeg",
    "encode_png",
    "write_png",
    "Video",
174
]