"tests/vscode:/vscode.git/clone" did not exist on "a27b4e436acd111391806540b37ab25706b8c6b9"
__init__.py 7.73 KB
Newer Older
1
from typing import Any, Dict, Iterator
2

3
4
import torch

Kai Zhang's avatar
Kai Zhang committed
5
from ..utils import _log_api_usage_once
6
7
8
9
10

try:
    from ._load_gpu_decoder import _HAS_VIDEO_DECODER
except ModuleNotFoundError:
    _HAS_VIDEO_DECODER = False
Zhicheng Yan's avatar
Zhicheng Yan committed
11
from ._video_opt import (
12
13
14
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
15
    _probe_video_from_file,
16
17
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
18
    _read_video_from_memory,
19
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
20
    _read_video_timestamps_from_memory,
21
)
22
from .image import (
23
    ImageReadMode,
24
    decode_image,
So Uchida's avatar
So Uchida committed
25
26
    decode_jpeg,
    decode_png,
27
28
    encode_jpeg,
    encode_png,
So Uchida's avatar
So Uchida committed
29
30
31
32
    read_file,
    read_image,
    write_file,
    write_jpeg,
33
    write_png,
34
)
35
36
37
38
39
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
)
40

41

42
if _HAS_VIDEO_OPT:
43

44
    def _has_video_opt() -> bool:
45
        return True
46
47


48
else:
49

50
    def _has_video_opt() -> bool:
51
        return False
52
53


54
55
56
57
58
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
59

60
    Example:
Bruno Korbar's avatar
Bruno Korbar committed
61
        The following examples creates a :mod:`VideoReader` object, seeks into 2s
62
        point, and returns a single frame::
63

64
65
66
67
68
            import torchvision
            video_path = "path_to_a_test_video"
            reader = torchvision.io.VideoReader(video_path, "video")
            reader.seek(2.0)
            frame = next(reader)
Bruno Korbar's avatar
Bruno Korbar committed
69
70
71
72

        :mod:`VideoReader` implements the iterable API, which makes it suitable to
        using it in conjunction with :mod:`itertools` for more advanced reading.
        As such, we can use a :mod:`VideoReader` instance inside for loops::
73

Bruno Korbar's avatar
Bruno Korbar committed
74
75
76
77
78
79
            reader.seek(2)
            for frame in reader:
                frames.append(frame['data'])
            # additionally, `seek` implements a fluent API, so we can do
            for frame in reader.seek(2):
                frames.append(frame['data'])
80

Bruno Korbar's avatar
Bruno Korbar committed
81
82
        With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
        following code::
83

Bruno Korbar's avatar
Bruno Korbar committed
84
85
            for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
                frames.append(frame['data'])
86

Bruno Korbar's avatar
Bruno Korbar committed
87
88
        and similarly, reading 10 frames after the 2s timestamp can be achieved
        as follows::
89

Bruno Korbar's avatar
Bruno Korbar committed
90
91
92
93
94
95
96
97
98
99
            for frame in itertools.islice(reader.seek(2), 10):
                frames.append(frame['data'])

    .. note::

        Each stream descriptor consists of two parts: stream type (e.g. 'video') and
        a unique stream id (which are determined by the video encoding).
        In this way, if the video contaner contains multiple
        streams of the same type, users can acces the one they want.
        If only stream type is passed, the decoder auto-detects first stream of that type.
100

101
    Args:
102

103
        path (string): Path to the video file in supported format
104

Bruno Korbar's avatar
Bruno Korbar committed
105
106
107
        stream (string, optional): descriptor of the required stream, followed by the stream id,
            in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
            Currently available options include ``['video', 'audio']``
108
109
110
111

        num_threads (int, optional): number of threads used by the codec to decode video.
            Default value (0) enables multithreading with codec-dependent heuristic. The performance
            will depend on the version of FFMPEG codecs supported.
Prabhat Roy's avatar
Prabhat Roy committed
112
113
114

        device (str, optional): Device to be used for decoding. Defaults to ``"cpu"``.

115
    """
116

Prabhat Roy's avatar
Prabhat Roy committed
117
    def __init__(self, path: str, stream: str = "video", num_threads: int = 0, device: str = "cpu") -> None:
Kai Zhang's avatar
Kai Zhang committed
118
        _log_api_usage_once(self)
Prabhat Roy's avatar
Prabhat Roy committed
119
120
121
122
123
124
125
126
127
128
        self.is_cuda = False
        device = torch.device(device)
        if device.type == "cuda":
            if not _HAS_VIDEO_DECODER:
                raise RuntimeError("Not compiled with GPU decoder support.")
            self.is_cuda = True
            if device.index is None:
                raise RuntimeError("Invalid cuda device!")
            self._c = torch.classes.torchvision.GPUDecoder(path, device.index)
            return
129
        if not _has_video_opt():
130
131
132
            raise RuntimeError(
                "Not compiled with video_reader support, "
                + "to enable video_reader support, please install "
133
                + "ffmpeg (version 4.2 is currently supported) and "
134
135
                + "build torchvision from source."
            )
Prabhat Roy's avatar
Prabhat Roy committed
136

137
        self._c = torch.classes.torchvision.Video(path, stream, num_threads)
138

139
    def __next__(self) -> Dict[str, Any]:
140
141
142
143
144
        """Decodes and returns the next frame of the current stream.
        Frames are encoded as a dict with mandatory
        data and pts fields, where data is a tensor, and pts is a
        presentation timestamp of the frame expressed in seconds
        as a float.
145

146
        Returns:
147
148
            (dict): a dictionary and containing decoded frame (``data``)
            and corresponding timestamp (``pts``) in seconds
149

150
        """
Prabhat Roy's avatar
Prabhat Roy committed
151
152
153
154
155
        if self.is_cuda:
            frame = self._c.next()
            if frame.numel() == 0:
                raise StopIteration
            return {"data": frame}
156
157
158
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
Bruno Korbar's avatar
Bruno Korbar committed
159
        return {"data": frame, "pts": pts}
160

161
    def __iter__(self) -> Iterator[Dict[str, Any]]:
162
        return self
163

164
    def seek(self, time_s: float, keyframes_only: bool = False) -> "VideoReader":
165
        """Seek within current stream.
166

167
168
        Args:
            time_s (float): seek time in seconds
169
            keyframes_only (bool): allow to seek only to keyframes
170

171
172
173
174
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
Bruno Korbar's avatar
Bruno Korbar committed
175
            the first frame with timestamp larger than ``time_s``.
176
        """
Prabhat Roy's avatar
Prabhat Roy committed
177
178
        if self.is_cuda:
            raise RuntimeError("seek() not yet supported with GPU decoding.")
179
        self._c.seek(time_s, keyframes_only)
180
        return self
181

182
    def get_metadata(self) -> Dict[str, Any]:
183
        """Returns video metadata
184

185
186
187
188
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
        return self._c.get_metadata()
189

190
    def set_current_stream(self, stream: str) -> bool:
191
192
        """Set current stream.
        Explicitly define the stream we are operating on.
193

194
        Args:
Bruno Korbar's avatar
Bruno Korbar committed
195
196
            stream (string): descriptor of the required stream. Defaults to ``"video:0"``
                Currently available stream types include ``['video', 'audio']``.
197
198
199
200
201
202
203
204
205
206
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
Prabhat Roy's avatar
Prabhat Roy committed
207
208
        if self.is_cuda:
            print("GPU decoding only works with video stream.")
209
        return self._c.set_current_stream(stream)
210
211


212
__all__ = [
213
214
215
216
217
218
219
220
221
222
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
Prabhat Roy's avatar
Prabhat Roy committed
223
    "_HAS_VIDEO_DECODER",
224
225
226
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
227
    "Timebase",
228
    "ImageReadMode",
229
    "decode_image",
So Uchida's avatar
So Uchida committed
230
231
    "decode_jpeg",
    "decode_png",
232
233
    "encode_jpeg",
    "encode_png",
So Uchida's avatar
So Uchida committed
234
235
236
237
    "read_file",
    "read_image",
    "write_file",
    "write_jpeg",
238
239
    "write_png",
    "Video",
240
]