__init__.py 8.19 KB
Newer Older
1
from typing import Any, Dict, Iterator
2

3
4
import torch

Kai Zhang's avatar
Kai Zhang committed
5
from ..utils import _log_api_usage_once
Zhicheng Yan's avatar
Zhicheng Yan committed
6
from ._video_opt import (
7
8
    Timebase,
    VideoMetaData,
Prabhat Roy's avatar
Prabhat Roy committed
9
    _HAS_VIDEO_DECODER,
10
    _HAS_VIDEO_OPT,
11
    _probe_video_from_file,
12
13
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
14
    _read_video_from_memory,
15
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
16
    _read_video_timestamps_from_memory,
17
)
18
from .image import (
19
    ImageReadMode,
20
    decode_image,
So Uchida's avatar
So Uchida committed
21
22
    decode_jpeg,
    decode_png,
23
24
    encode_jpeg,
    encode_png,
So Uchida's avatar
So Uchida committed
25
26
27
28
    read_file,
    read_image,
    write_file,
    write_jpeg,
29
    write_png,
30
)
31
32
33
34
35
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
)
36

37

38
if _HAS_VIDEO_OPT:
39

40
    def _has_video_opt() -> bool:
41
        return True
42
43


44
else:
45

46
    def _has_video_opt() -> bool:
47
        return False
48
49


50
51
52
53
54
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
55

56
    Example:
Bruno Korbar's avatar
Bruno Korbar committed
57
        The following examples creates a :mod:`VideoReader` object, seeks into 2s
58
        point, and returns a single frame::
59

60
61
62
63
64
            import torchvision
            video_path = "path_to_a_test_video"
            reader = torchvision.io.VideoReader(video_path, "video")
            reader.seek(2.0)
            frame = next(reader)
Bruno Korbar's avatar
Bruno Korbar committed
65
66
67
68

        :mod:`VideoReader` implements the iterable API, which makes it suitable to
        using it in conjunction with :mod:`itertools` for more advanced reading.
        As such, we can use a :mod:`VideoReader` instance inside for loops::
69

Bruno Korbar's avatar
Bruno Korbar committed
70
71
72
73
74
75
            reader.seek(2)
            for frame in reader:
                frames.append(frame['data'])
            # additionally, `seek` implements a fluent API, so we can do
            for frame in reader.seek(2):
                frames.append(frame['data'])
76

Bruno Korbar's avatar
Bruno Korbar committed
77
78
        With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the
        following code::
79

Bruno Korbar's avatar
Bruno Korbar committed
80
81
            for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)):
                frames.append(frame['data'])
82

Bruno Korbar's avatar
Bruno Korbar committed
83
84
        and similarly, reading 10 frames after the 2s timestamp can be achieved
        as follows::
85

Bruno Korbar's avatar
Bruno Korbar committed
86
87
88
89
90
91
92
93
94
95
            for frame in itertools.islice(reader.seek(2), 10):
                frames.append(frame['data'])

    .. note::

        Each stream descriptor consists of two parts: stream type (e.g. 'video') and
        a unique stream id (which are determined by the video encoding).
        In this way, if the video contaner contains multiple
        streams of the same type, users can acces the one they want.
        If only stream type is passed, the decoder auto-detects first stream of that type.
96

97
    Args:
98

99
        path (string): Path to the video file in supported format
100

Bruno Korbar's avatar
Bruno Korbar committed
101
102
103
        stream (string, optional): descriptor of the required stream, followed by the stream id,
            in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``.
            Currently available options include ``['video', 'audio']``
104
105
106
107

        num_threads (int, optional): number of threads used by the codec to decode video.
            Default value (0) enables multithreading with codec-dependent heuristic. The performance
            will depend on the version of FFMPEG codecs supported.
Prabhat Roy's avatar
Prabhat Roy committed
108
109
110

        device (str, optional): Device to be used for decoding. Defaults to ``"cpu"``.

111
    """
112

Prabhat Roy's avatar
Prabhat Roy committed
113
    def __init__(self, path: str, stream: str = "video", num_threads: int = 0, device: str = "cpu") -> None:
Kai Zhang's avatar
Kai Zhang committed
114
        _log_api_usage_once(self)
Prabhat Roy's avatar
Prabhat Roy committed
115
116
117
118
119
120
121
122
123
124
        self.is_cuda = False
        device = torch.device(device)
        if device.type == "cuda":
            if not _HAS_VIDEO_DECODER:
                raise RuntimeError("Not compiled with GPU decoder support.")
            self.is_cuda = True
            if device.index is None:
                raise RuntimeError("Invalid cuda device!")
            self._c = torch.classes.torchvision.GPUDecoder(path, device.index)
            return
125
        if not _has_video_opt():
126
127
128
            raise RuntimeError(
                "Not compiled with video_reader support, "
                + "to enable video_reader support, please install "
129
                + "ffmpeg (version 4.2 is currently supported) and "
130
131
                + "build torchvision from source."
            )
Prabhat Roy's avatar
Prabhat Roy committed
132

133
        self._c = torch.classes.torchvision.Video(path, stream, num_threads)
134

135
    def __next__(self) -> Dict[str, Any]:
136
137
138
139
140
        """Decodes and returns the next frame of the current stream.
        Frames are encoded as a dict with mandatory
        data and pts fields, where data is a tensor, and pts is a
        presentation timestamp of the frame expressed in seconds
        as a float.
141

142
        Returns:
143
144
            (dict): a dictionary and containing decoded frame (``data``)
            and corresponding timestamp (``pts``) in seconds
145

146
        """
Prabhat Roy's avatar
Prabhat Roy committed
147
148
149
150
151
        if self.is_cuda:
            frame = self._c.next()
            if frame.numel() == 0:
                raise StopIteration
            return {"data": frame}
152
153
154
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
Bruno Korbar's avatar
Bruno Korbar committed
155
        return {"data": frame, "pts": pts}
156

157
    def __iter__(self) -> Iterator[Dict[str, Any]]:
158
        return self
159

160
    def seek(self, time_s: float, keyframes_only: bool = False) -> "VideoReader":
161
        """Seek within current stream.
162

163
164
        Args:
            time_s (float): seek time in seconds
165
            keyframes_only (bool): allow to seek only to keyframes
166

167
168
169
170
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
Bruno Korbar's avatar
Bruno Korbar committed
171
            the first frame with timestamp larger than ``time_s``.
172
        """
Prabhat Roy's avatar
Prabhat Roy committed
173
174
        if self.is_cuda:
            raise RuntimeError("seek() not yet supported with GPU decoding.")
175
        self._c.seek(time_s, keyframes_only)
176
        return self
177

178
    def get_metadata(self) -> Dict[str, Any]:
179
        """Returns video metadata
180

181
182
183
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
Prabhat Roy's avatar
Prabhat Roy committed
184
185
        if self.is_cuda:
            raise RuntimeError("get_metadata() not yet supported with GPU decoding.")
186
        return self._c.get_metadata()
187

188
    def set_current_stream(self, stream: str) -> bool:
189
190
        """Set current stream.
        Explicitly define the stream we are operating on.
191

192
        Args:
Bruno Korbar's avatar
Bruno Korbar committed
193
194
            stream (string): descriptor of the required stream. Defaults to ``"video:0"``
                Currently available stream types include ``['video', 'audio']``.
195
196
197
198
199
200
201
202
203
204
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
Prabhat Roy's avatar
Prabhat Roy committed
205
206
        if self.is_cuda:
            print("GPU decoding only works with video stream.")
207
        return self._c.set_current_stream(stream)
208

Prabhat Roy's avatar
Prabhat Roy committed
209
210
211
212
213
214
215
216
217
218
    def _reformat(self, tensor, output_format: str = "yuv420"):
        supported_formats = [
            "yuv420",
        ]
        if output_format not in supported_formats:
            raise RuntimeError(f"{output_format} not supported, please use one of {', '.join(supported_formats)}")
        if not isinstance(tensor, torch.Tensor):
            raise RuntimeError("Expected tensor as input parameter!")
        return self._c.reformat(tensor.cpu())

219

220
__all__ = [
221
222
223
224
225
226
227
228
229
230
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
Prabhat Roy's avatar
Prabhat Roy committed
231
    "_HAS_VIDEO_DECODER",
232
233
234
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
235
    "Timebase",
236
    "ImageReadMode",
237
    "decode_image",
So Uchida's avatar
So Uchida committed
238
239
    "decode_jpeg",
    "decode_png",
240
241
    "encode_jpeg",
    "encode_png",
So Uchida's avatar
So Uchida committed
242
243
244
245
    "read_file",
    "read_image",
    "write_file",
    "write_jpeg",
246
247
    "write_png",
    "Video",
248
]