__init__.py 3.94 KB
Newer Older
1
2
import torch

Zhicheng Yan's avatar
Zhicheng Yan committed
3
from ._video_opt import (
4
5
6
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
7
    _probe_video_from_file,
8
9
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
10
    _read_video_from_memory,
11
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
12
    _read_video_timestamps_from_memory,
13
14
15
16
17
)
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
Zhicheng Yan's avatar
Zhicheng Yan committed
18
)
19
20
21
22
23
24
from .image import (
    read_image,
    decode_image,
    encode_jpeg,
    write_jpeg,
    encode_png,
25
    write_png,
26
27
)

28

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
if _HAS_VIDEO_OPT:

    class Video:
        """
        Fine-grained video-reading API.
        Supports frame-by-frame reading of various streams from a single video
        container.

        Args:

            path (string): Path to the video file in supported format

            stream (string, optional): descriptor of the required stream. Defaults to "video:0"
                Currently available options include :mod:`['video', 'audio', 'cc', 'sub']`

        Example:
            The following examples creates :mod:`Video` object, seeks into 2s
            point, and returns a single frame::
                    import torchvision
                    video_path = "path_to_a_test_video"

                    reader = torchvision.io.Video(video_path, "video")
                    reader.seek(2.0)
                    frame, timestamp = reader.next()
        """

        def __init__(self, path, stream="video"):
            self._c = torch.classes.torchvision.Video(path, stream)

        def next(self):
            """Iterator that decodes the next frame of the current stream

            Returns:
                ([torch.Tensor, float]): list containing decoded frame and corresponding timestamp

            """
            return self._c.next()

        def seek(self, time_s: float):
            """Seek within current stream.

            Args:
                time_s (float): seek time in seconds

            .. note::
                Current implementation is the so-called precise seek. This
                means following seek, call to :mod:`next()` will return the
                frame with the exact timestamp if it exists or
                the first frame with timestamp larger than time_s.
            """
            self._c.seek(time_s)

        def get_metadata(self):
            """Returns video metadata

            Returns:
                (dict): dictionary containing duration and frame rate for every stream
            """
            return self._c.get_metadata()

        def set_current_stream(self, stream: str):
            """Set current stream.
            Explicitly define the stream we are operating on.

            Args:
                stream (string): descriptor of the required stream. Defaults to "video:0"
                    Currently available stream types include :mod:`['video', 'audio', 'cc', 'sub']`.
                    Each descriptor consists of two parts: stream type (e.g. 'video') and
                    a unique stream id (which are determined by video encoding).
                    In this way, if the video contaner contains multiple
                    streams of the same type, users can acces the one they want.
                    If only stream type is passed, the decoder auto-detects first stream
                    of that type and returns it.

            Returns:
                (bool): True on succes, False otherwise
            """
            return self._c.set_current_stream(stream)


else:
    Video = None


113
__all__ = [
114
115
116
117
118
119
120
121
122
123
124
125
126
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
127
    "Timebase",
128
129
130
131
132
133
134
    "read_image",
    "decode_image",
    "encode_jpeg",
    "write_jpeg",
    "encode_png",
    "write_png",
    "Video",
135
]