video_utils.py 11.8 KB
Newer Older
1
import bisect
2
from fractions import Fraction
3
4
import math
import torch
5
6
7
8
from torchvision.io import (
    _read_video_timestamps_from_file,
    _read_video_from_file,
)
9
10
from torchvision.io import read_video_timestamps, read_video

11
12
from .utils import tqdm

13

14
15
16
17
18
19
20
21
22
23
24
25
def pts_convert(pts, timebase_from, timebase_to, round_func=math.floor):
    """convert pts between different time bases
    Args:
        pts: presentation timestamp, float
        timebase_from: original timebase. Fraction
        timebase_to: new timebase. Fraction
        round_func: rounding function.
    """
    new_pts = Fraction(pts, 1) * timebase_from / timebase_to
    return round_func(new_pts)


26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def unfold(tensor, size, step, dilation=1):
    """
    similar to tensor.unfold, but with the dilation
    and specialized for 1d tensors

    Returns all consecutive windows of `size` elements, with
    `step` between windows. The distance between each element
    in a window is given by `dilation`.
    """
    assert tensor.dim() == 1
    o_stride = tensor.stride(0)
    numel = tensor.numel()
    new_stride = (step * o_stride, dilation * o_stride)
    new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size)
    if new_size[0] < 1:
        new_size = (0, size)
    return torch.as_strided(tensor, new_size, new_stride)


class VideoClips(object):
    """
    Given a list of video files, computes all consecutive subvideos of size
    `clip_length_in_frames`, where the distance between each subvideo in the
    same video is defined by `frames_between_clips`.
    If `frame_rate` is specified, it will also resample all the videos to have
    the same frame rate, and the clips will refer to this frame rate.

    Creating this instance the first time is time-consuming, as it needs to
    decode all the videos in `video_paths`. It is recommended that you
    cache the results after instantiation of the class.

    Recreating the clips for different clip lengths is fast, and can be done
    with the `compute_clips` method.

    Arguments:
        video_paths (List[str]): paths to the video files
        clip_length_in_frames (int): size of a clip in number of frames
        frames_between_clips (int): step (in frames) between each clip
        frame_rate (int, optional): if specified, it will resample the video
            so that it has `frame_rate`, and then the clips will be defined
            on the resampled video
ekosman's avatar
ekosman committed
67
68
        num_workers (int): how many subprocesses to use for data loading.
            0 means that the data will be loaded in the main process. (default: 0)
69
70
    """
    def __init__(self, video_paths, clip_length_in_frames=16, frames_between_clips=1,
ekosman's avatar
ekosman committed
71
                 frame_rate=None, _precomputed_metadata=None, num_workers=0, _backend="pyav"):
72
        self.video_paths = video_paths
73
        self.num_workers = num_workers
74
        self._backend = _backend
ekosman's avatar
ekosman committed
75

76
77
78
79
        if _precomputed_metadata is None:
            self._compute_frame_pts()
        else:
            self._init_from_metadata(_precomputed_metadata)
80
81
82
83
        self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate)

    def _compute_frame_pts(self):
        self.video_pts = []
84
85
86
87
        if self._backend == "pyav":
            self.video_fps = []
        else:
            self.info = []
88
89
90
91

        # strategy: use a DataLoader to parallelize read_video_timestamps
        # so need to create a dummy dataset first
        class DS(object):
92
            def __init__(self, x, _backend):
93
                self.x = x
94
                self._backend = _backend
95
96
97
98
99

            def __len__(self):
                return len(self.x)

            def __getitem__(self, idx):
100
101
102
103
                if self._backend == "pyav":
                    return read_video_timestamps(self.x[idx])
                else:
                    return _read_video_timestamps_from_file(self.x[idx])
104
105
106

        import torch.utils.data
        dl = torch.utils.data.DataLoader(
107
            DS(self.video_paths, self._backend),
108
            batch_size=16,
109
            num_workers=self.num_workers,
110
111
112
113
114
            collate_fn=lambda x: x)

        with tqdm(total=len(dl)) as pbar:
            for batch in dl:
                pbar.update(1)
115
116
117
118
119
120
121
122
123
124
                if self._backend == "pyav":
                    clips, fps = list(zip(*batch))
                    clips = [torch.as_tensor(c) for c in clips]
                    self.video_pts.extend(clips)
                    self.video_fps.extend(fps)
                else:
                    video_pts, _audio_pts, info = list(zip(*batch))
                    video_pts = [torch.as_tensor(c) for c in video_pts]
                    self.video_pts.extend(video_pts)
                    self.info.extend(info)
125

126
    def _init_from_metadata(self, metadata):
127
        self.video_paths = metadata["video_paths"]
128
129
        assert len(self.video_paths) == len(metadata["video_pts"])
        self.video_pts = metadata["video_pts"]
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147

        if self._backend == "pyav":
            assert len(self.video_paths) == len(metadata["video_fps"])
            self.video_fps = metadata["video_fps"]
        else:
            assert len(self.video_paths) == len(metadata["info"])
            self.info = metadata["info"]

    @property
    def metadata(self):
        _metadata = {
            "video_paths": self.video_paths,
            "video_pts": self.video_pts,
        }
        if self._backend == "pyav":
            _metadata.update({"video_fps": self.video_fps})
        else:
            _metadata.update({"info": self.info})
148
149
150
151

    def subset(self, indices):
        video_paths = [self.video_paths[i] for i in indices]
        video_pts = [self.video_pts[i] for i in indices]
152
153
154
155
        if self._backend == "pyav":
            video_fps = [self.video_fps[i] for i in indices]
        else:
            info = [self.info[i] for i in indices]
156
        metadata = {
157
            "video_paths": video_paths,
158
159
            "video_pts": video_pts,
        }
160
161
162
163
        if self._backend == "pyav":
            metadata.update({"video_fps": video_fps})
        else:
            metadata.update({"info": info})
164
165
166
        return type(self)(video_paths, self.num_frames, self.step, self.frame_rate,
                          _precomputed_metadata=metadata)

167
168
    @staticmethod
    def compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate):
169
170
171
172
        if fps is None:
            # if for some reason the video doesn't have fps (because doesn't have a video stream)
            # set the fps to 1. The value doesn't matter, because video_pts is empty anyway
            fps = 1
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
        if frame_rate is None:
            frame_rate = fps
        total_frames = len(video_pts) * (float(frame_rate) / fps)
        idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
        video_pts = video_pts[idxs]
        clips = unfold(video_pts, num_frames, step)
        if isinstance(idxs, slice):
            idxs = [idxs] * len(clips)
        else:
            idxs = unfold(idxs, num_frames, step)
        return clips, idxs

    def compute_clips(self, num_frames, step, frame_rate=None):
        """
        Compute all consecutive sequences of clips from video_pts.
        Always returns clips of size `num_frames`, meaning that the
        last few frames in a video can potentially be dropped.

        Arguments:
            num_frames (int): number of frames for the clip
            step (int): distance between two clips
            dilation (int): distance between two consecutive frames
                in a clip
        """
        self.num_frames = num_frames
        self.step = step
        self.frame_rate = frame_rate
        self.clips = []
        self.resampling_idxs = []
202
203
204
205
206
207
208
209
210
211
        if self._backend == "pyav":
            for video_pts, fps in zip(self.video_pts, self.video_fps):
                clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
                self.clips.append(clips)
                self.resampling_idxs.append(idxs)
        else:
            for video_pts, info in zip(self.video_pts, self.info):
                clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, info["video_fps"], frame_rate)
                self.clips.append(clips)
                self.resampling_idxs.append(idxs)
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
        clip_lengths = torch.as_tensor([len(v) for v in self.clips])
        self.cumulative_sizes = clip_lengths.cumsum(0).tolist()

    def __len__(self):
        return self.num_clips()

    def num_videos(self):
        return len(self.video_paths)

    def num_clips(self):
        """
        Number of subclips that are available in the video list.
        """
        return self.cumulative_sizes[-1]

    def get_clip_location(self, idx):
        """
        Converts a flattened representation of the indices into a video_idx, clip_idx
        representation.
        """
        video_idx = bisect.bisect_right(self.cumulative_sizes, idx)
        if video_idx == 0:
            clip_idx = idx
        else:
            clip_idx = idx - self.cumulative_sizes[video_idx - 1]
        return video_idx, clip_idx

    @staticmethod
    def _resample_video_idx(num_frames, original_fps, new_fps):
        step = float(original_fps) / new_fps
        if step.is_integer():
            # optimization: if step is integer, don't need to perform
            # advanced indexing
            step = int(step)
            return slice(None, None, step)
        idxs = torch.arange(num_frames, dtype=torch.float32) * step
        idxs = idxs.floor().to(torch.int64)
        return idxs

    def get_clip(self, idx):
        """
        Gets a subclip from a list of videos.

        Arguments:
            idx (int): index of the subclip. Must be between 0 and num_clips().

        Returns:
            video (Tensor)
            audio (Tensor)
            info (Dict)
            video_idx (int): index of the video in `video_paths`
        """
        if idx >= self.num_clips():
            raise IndexError("Index {} out of range "
                             "({} number of clips)".format(idx, self.num_clips()))
        video_idx, clip_idx = self.get_clip_location(idx)
        video_path = self.video_paths[video_idx]
        clip_pts = self.clips[video_idx][clip_idx]
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303

        if self._backend == "pyav":
            start_pts = clip_pts[0].item()
            end_pts = clip_pts[-1].item()
            video, audio, info = read_video(video_path, start_pts, end_pts)
        else:
            info = self.info[video_idx]

            video_start_pts = clip_pts[0].item()
            video_end_pts = clip_pts[-1].item()

            audio_start_pts, audio_end_pts = 0, -1
            audio_timebase = Fraction(0, 1)
            if "audio_timebase" in info:
                audio_timebase = info["audio_timebase"]
                audio_start_pts = pts_convert(
                    video_start_pts,
                    info["video_timebase"],
                    info["audio_timebase"],
                    math.floor,
                )
                audio_end_pts = pts_convert(
                    video_start_pts,
                    info["video_timebase"],
                    info["audio_timebase"],
                    math.ceil,
                )
            video, audio, info = _read_video_from_file(
                video_path,
                video_pts_range=(video_start_pts, video_end_pts),
                video_timebase=info["video_timebase"],
                audio_pts_range=(audio_start_pts, audio_end_pts),
                audio_timebase=audio_timebase,
            )
304
305
306
307
308
309
        if self.frame_rate is not None:
            resampling_idx = self.resampling_idxs[video_idx][clip_idx]
            if isinstance(resampling_idx, torch.Tensor):
                resampling_idx = resampling_idx - resampling_idx[0]
            video = video[resampling_idx]
            info["video_fps"] = self.frame_rate
310
        assert len(video) == self.num_frames, "{} x {}".format(video.shape, self.num_frames)
311
        return video, audio, info, video_idx