"scripts/deprecated/test_openai_server.py" did not exist on "e4d3333c6c9841f139222ea675a4f29241362f49"
__init__.py 4.05 KB
Newer Older
1
2
import torch

Zhicheng Yan's avatar
Zhicheng Yan committed
3
from ._video_opt import (
4
5
6
    Timebase,
    VideoMetaData,
    _HAS_VIDEO_OPT,
7
    _probe_video_from_file,
8
9
    _probe_video_from_memory,
    _read_video_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
10
    _read_video_from_memory,
11
    _read_video_timestamps_from_file,
Zhicheng Yan's avatar
Zhicheng Yan committed
12
    _read_video_timestamps_from_memory,
13
14
15
16
17
)
from .video import (
    read_video,
    read_video_timestamps,
    write_video,
Zhicheng Yan's avatar
Zhicheng Yan committed
18
)
19
20
21
22
23
24
from .image import (
    read_image,
    decode_image,
    encode_jpeg,
    write_jpeg,
    encode_png,
25
    write_png,
26
27
)

28

29
if _HAS_VIDEO_OPT:
30
31
32
33
34
    def _has_video_opt():
        return True
else:
    def _has_video_opt():
        return False
35
36


37
38
39
40
41
class VideoReader:
    """
    Fine-grained video-reading API.
    Supports frame-by-frame reading of various streams from a single video
    container.
42

43
44
45
46
47
    Example:
        The following examples creates :mod:`Video` object, seeks into 2s
        point, and returns a single frame::
                import torchvision
                video_path = "path_to_a_test_video"
48

49
50
51
                reader = torchvision.io.VideoReader(video_path, "video")
                reader.seek(2.0)
                frame, timestamp = next(reader)
52

53
    Args:
54

55
        path (string): Path to the video file in supported format
56

57
58
59
        stream (string, optional): descriptor of the required stream. Defaults to "video:0"
            Currently available options include :mod:`['video', 'audio', 'cc', 'sub']`
    """
60

61
62
63
64
    def __init__(self, path, stream="video"):
        if not _has_video_opt():
            raise RuntimeError("Not compiled with video_reader support")
        self._c = torch.classes.torchvision.Video(path, stream)
65

66
67
    def __next__(self):
        """Decodes and returns the next frame of the current stream
68

69
70
        Returns:
            ([torch.Tensor, float]): list containing decoded frame and corresponding timestamp
71

72
73
74
75
76
        """
        frame, pts = self._c.next()
        if frame.numel() == 0:
            raise StopIteration
        return frame, pts
77

78
79
    def __iter__(self):
        return self
80

81
82
    def seek(self, time_s: float):
        """Seek within current stream.
83

84
85
        Args:
            time_s (float): seek time in seconds
86

87
88
89
90
91
92
93
94
        .. note::
            Current implementation is the so-called precise seek. This
            means following seek, call to :mod:`next()` will return the
            frame with the exact timestamp if it exists or
            the first frame with timestamp larger than time_s.
        """
        self._c.seek(time_s)
        return self
95

96
97
    def get_metadata(self):
        """Returns video metadata
98

99
100
101
102
        Returns:
            (dict): dictionary containing duration and frame rate for every stream
        """
        return self._c.get_metadata()
103

104
105
106
    def set_current_stream(self, stream: str):
        """Set current stream.
        Explicitly define the stream we are operating on.
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
        Args:
            stream (string): descriptor of the required stream. Defaults to "video:0"
                Currently available stream types include :mod:`['video', 'audio', 'cc', 'sub']`.
                Each descriptor consists of two parts: stream type (e.g. 'video') and
                a unique stream id (which are determined by video encoding).
                In this way, if the video contaner contains multiple
                streams of the same type, users can acces the one they want.
                If only stream type is passed, the decoder auto-detects first stream
                of that type and returns it.

        Returns:
            (bool): True on succes, False otherwise
        """
        return self._c.set_current_stream(stream)
122
123


124
__all__ = [
125
126
127
128
129
130
131
132
133
134
135
136
137
    "write_video",
    "read_video",
    "read_video_timestamps",
    "_read_video_from_file",
    "_read_video_timestamps_from_file",
    "_probe_video_from_file",
    "_read_video_from_memory",
    "_read_video_timestamps_from_memory",
    "_probe_video_from_memory",
    "_HAS_VIDEO_OPT",
    "_read_video_clip_from_memory",
    "_read_video_meta_data",
    "VideoMetaData",
138
    "Timebase",
139
140
141
142
143
144
145
    "read_image",
    "decode_image",
    "encode_jpeg",
    "write_jpeg",
    "encode_png",
    "write_png",
    "Video",
146
]