__init__.py 6.06 KB
Newer Older
1
2
import torch

Zhicheng Yan's avatar
Zhicheng Yan committed
3
from ._video_opt import (
4
5
6
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
7
    _probe_video_from_file,
8
9
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
10
    _read_video_from_memory,
11
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
12
    _read_video_timestamps_from_memory,
13
14
15
16
17
)
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
Zhicheng Yan's avatar
Zhicheng Yan committed
18
)
19
from .image import (
20
    ImageReadMode,
21
    decode_image,
So Uchida's avatar
So Uchida committed
22
23
    decode_jpeg,
    decode_png,
24
25
    encode_jpeg,
    encode_png,
So Uchida's avatar
So Uchida committed
26
27
28
29
    read_file,
    read_image,
    write_file,
    write_jpeg,
30
    write_png,
31
32
)

33

34
if _HAS_VIDEO_OPT:
35

36
37
    def _has_video_opt():
        return True
38
39


40
else:
41

42
43
    def _has_video_opt():
        return False
44
45


46
47
48
49
50
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
51

52
    Example:
Bruno Korbar's avatar
Bruno Korbar committed
53
        The following examples creates a :mod:`VideoReader` object, seeks into 2s
54
        point, and returns a single frame::
55

56
57
58
59
60
            import torchvision
            video_path = "path_to_a_test_video"
            reader = torchvision.io.VideoReader(video_path, "video")
            reader.seek(2.0)
            frame = next(reader)
Bruno Korbar's avatar
Bruno Korbar committed
61
62
63
64

        :mod:`VideoReader` implements the iterable API, which makes it suitable to
        using it in conjunction with :mod:`itertools` for more advanced reading.
        As such, we can use a :mod:`VideoReader` instance inside for loops::
65

Bruno Korbar's avatar
Bruno Korbar committed
66
67
68
69
70
71
            reader.seek(2)
            for frame in reader:
                frames.append(frame['data'])
            # additionally, `seek` implements a fluent API, so we can do
            for frame in reader.seek(2):
                frames.append(frame['data'])
72

Bruno Korbar's avatar
Bruno Korbar committed
73
74
        With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
        following code::
75

Bruno Korbar's avatar
Bruno Korbar committed
76
77
            for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
                frames.append(frame['data'])
78

Bruno Korbar's avatar
Bruno Korbar committed
79
80
        and similarly, reading 10 frames after the 2s timestamp can be achieved
        as follows::
81

Bruno Korbar's avatar
Bruno Korbar committed
82
83
84
85
86
87
88
89
90
91
            for frame in itertools.islice(reader.seek(2), 10):
                frames.append(frame['data'])

    .. note::

        Each stream descriptor consists of two parts: stream type (e.g. 'video') and
        a unique stream id (which are determined by the video encoding).
        In this way, if the video contaner contains multiple
        streams of the same type, users can acces the one they want.
        If only stream type is passed, the decoder auto-detects first stream of that type.
92

93
    Args:
94

95
        path (string): Path to the video file in supported format
96

Bruno Korbar's avatar
Bruno Korbar committed
97
98
99
        stream (string, optional): descriptor of the required stream, followed by the stream id,
            in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
            Currently available options include ``['video', 'audio']``
100
    """
101

102
    def __init__(self, path, stream="video"):
103
        if not _has_video_opt():
104
105
106
107
108
109
            raise RuntimeError(
                "Not compiled with video_reader support, "
                + "to enable video_reader support, please install "
                + "ffmpeg (version 4.2 is currently supported) and"
                + "build torchvision from source."
            )
110
        self._c = torch.classes.torchvision.Video(path, stream)
111

112
    def __next__(self):
113
114
115
116
117
        """Decodes and returns the next frame of the current stream.
        Frames are encoded as a dict with mandatory
        data and pts fields, where data is a tensor, and pts is a
        presentation timestamp of the frame expressed in seconds
        as a float.
118

119
        Returns:
120
121
            (dict): a dictionary and containing decoded frame (``data``)
            and corresponding timestamp (``pts``) in seconds
122

123
124
125
126
        """
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
Bruno Korbar's avatar
Bruno Korbar committed
127
        return {"data": frame, "pts": pts}
128

129
130
    def __iter__(self):
        return self
131

132
133
    def seek(self, time_s: float):
        """Seek within current stream.
134

135
136
        Args:
            time_s (float): seek time in seconds
137

138
139
140
141
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
Bruno Korbar's avatar
Bruno Korbar committed
142
            the first frame with timestamp larger than ``time_s``.
143
144
145
        """
        self._c.seek(time_s)
        return self
146

147
148
    def get_metadata(self):
        """Returns video metadata
149

150
151
152
153
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
        return self._c.get_metadata()
154

155
156
157
    def set_current_stream(self, stream: str):
        """Set current stream.
        Explicitly define the stream we are operating on.
158

159
        Args:
Bruno Korbar's avatar
Bruno Korbar committed
160
161
            stream (string): descriptor of the required stream. Defaults to ``"video:0"``
                Currently available stream types include ``['video', 'audio']``.
162
163
164
165
166
167
168
169
170
171
172
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
        return self._c.set_current_stream(stream)
173
174


175
__all__ = [
176
177
178
179
180
181
182
183
184
185
186
187
188
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
189
    "Timebase",
190
    "ImageReadMode",
191
    "decode_image",
So Uchida's avatar
So Uchida committed
192
193
    "decode_jpeg",
    "decode_png",
194
195
    "encode_jpeg",
    "encode_png",
So Uchida's avatar
So Uchida committed
196
197
198
199
    "read_file",
    "read_image",
    "write_file",
    "write_jpeg",
200
201
    "write_png",
    "Video",
202
]