__init__.py 6.22 KB
Newer Older
1
import torch
2
from typing import Any, Dict, Iterator
3

Zhicheng Yan's avatar
Zhicheng Yan committed
4
from ._video_opt import (
5
6
7
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
8
    _probe_video_from_file,
9
10
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
11
    _read_video_from_memory,
12
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
13
    _read_video_timestamps_from_memory,
14
15
16
17
18
)
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
Zhicheng Yan's avatar
Zhicheng Yan committed
19
)
20
from .image import (
21
    ImageReadMode,
22
    decode_image,
So Uchida's avatar
So Uchida committed
23
24
    decode_jpeg,
    decode_png,
25
26
    encode_jpeg,
    encode_png,
So Uchida's avatar
So Uchida committed
27
28
29
30
    read_file,
    read_image,
    write_file,
    write_jpeg,
31
    write_png,
32
33
)

34

35
if _HAS_VIDEO_OPT:
36

37
    def _has_video_opt() -> bool:
38
        return True
39
40


41
else:
42

43
    def _has_video_opt() -> bool:
44
        return False
45
46


47
48
49
50
51
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
52

53
    Example:
Bruno Korbar's avatar
Bruno Korbar committed
54
        The following examples creates a :mod:`VideoReader` object, seeks into 2s
55
        point, and returns a single frame::
56

57
58
59
60
61
            import torchvision
            video_path = "path_to_a_test_video"
            reader = torchvision.io.VideoReader(video_path, "video")
            reader.seek(2.0)
            frame = next(reader)
Bruno Korbar's avatar
Bruno Korbar committed
62
63
64
65

        :mod:`VideoReader` implements the iterable API, which makes it suitable to
        using it in conjunction with :mod:`itertools` for more advanced reading.
        As such, we can use a :mod:`VideoReader` instance inside for loops::
66

Bruno Korbar's avatar
Bruno Korbar committed
67
68
69
70
71
72
            reader.seek(2)
            for frame in reader:
                frames.append(frame['data'])
            # additionally, `seek` implements a fluent API, so we can do
            for frame in reader.seek(2):
                frames.append(frame['data'])
73

Bruno Korbar's avatar
Bruno Korbar committed
74
75
        With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
        following code::
76

Bruno Korbar's avatar
Bruno Korbar committed
77
78
            for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
                frames.append(frame['data'])
79

Bruno Korbar's avatar
Bruno Korbar committed
80
81
        and similarly, reading 10 frames after the 2s timestamp can be achieved
        as follows::
82

Bruno Korbar's avatar
Bruno Korbar committed
83
84
85
86
87
88
89
90
91
92
            for frame in itertools.islice(reader.seek(2), 10):
                frames.append(frame['data'])

    .. note::

        Each stream descriptor consists of two parts: stream type (e.g. 'video') and
        a unique stream id (which are determined by the video encoding).
        In this way, if the video contaner contains multiple
        streams of the same type, users can acces the one they want.
        If only stream type is passed, the decoder auto-detects first stream of that type.
93

94
    Args:
95

96
        path (string): Path to the video file in supported format
97

Bruno Korbar's avatar
Bruno Korbar committed
98
99
100
        stream (string, optional): descriptor of the required stream, followed by the stream id,
            in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
            Currently available options include ``['video', 'audio']``
101
    """
102

103
    def __init__(self, path: str, stream: str = "video") -> None:
104
        if not _has_video_opt():
105
106
107
108
109
110
            raise RuntimeError(
                "Not compiled with video_reader support, "
                + "to enable video_reader support, please install "
                + "ffmpeg (version 4.2 is currently supported) and"
                + "build torchvision from source."
            )
111
        self._c = torch.classes.torchvision.Video(path, stream)
112

113
    def __next__(self) -> Dict[str, Any]:
114
115
116
117
118
        """Decodes and returns the next frame of the current stream.
        Frames are encoded as a dict with mandatory
        data and pts fields, where data is a tensor, and pts is a
        presentation timestamp of the frame expressed in seconds
        as a float.
119

120
        Returns:
121
122
            (dict): a dictionary and containing decoded frame (``data``)
            and corresponding timestamp (``pts``) in seconds
123

124
125
126
127
        """
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
Bruno Korbar's avatar
Bruno Korbar committed
128
        return {"data": frame, "pts": pts}
129

130
    def __iter__(self) -> Iterator['VideoReader']:
131
        return self
132

133
    def seek(self, time_s: float) -> 'VideoReader':
134
        """Seek within current stream.
135

136
137
        Args:
            time_s (float): seek time in seconds
138

139
140
141
142
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
Bruno Korbar's avatar
Bruno Korbar committed
143
            the first frame with timestamp larger than ``time_s``.
144
145
146
        """
        self._c.seek(time_s)
        return self
147

148
    def get_metadata(self) -> Dict[str, Any]:
149
        """Returns video metadata
150

151
152
153
154
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
        return self._c.get_metadata()
155

156
    def set_current_stream(self, stream: str) -> bool:
157
158
        """Set current stream.
        Explicitly define the stream we are operating on.
159

160
        Args:
Bruno Korbar's avatar
Bruno Korbar committed
161
162
            stream (string): descriptor of the required stream. Defaults to ``"video:0"``
                Currently available stream types include ``['video', 'audio']``.
163
164
165
166
167
168
169
170
171
172
173
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
        return self._c.set_current_stream(stream)
174
175


176
__all__ = [
177
178
179
180
181
182
183
184
185
186
187
188
189
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
190
    "Timebase",
191
    "ImageReadMode",
192
    "decode_image",
So Uchida's avatar
So Uchida committed
193
194
    "decode_jpeg",
    "decode_png",
195
196
    "encode_jpeg",
    "encode_png",
So Uchida's avatar
So Uchida committed
197
198
199
200
    "read_file",
    "read_image",
    "write_file",
    "write_jpeg",
201
202
    "write_png",
    "Video",
203
]