Commit 78743740 authored by Zhicheng Yan's avatar Zhicheng Yan Committed by Francisco Massa
Browse files

add _backend argument to __init__() of class VideoClips (#1363)

* add _backend argument to __init__() of class VideoClips

* minor fix

* minor fix

* Make backend private in VideoClips

* Fix lint
parent 64917bcc
...@@ -6,6 +6,7 @@ import unittest ...@@ -6,6 +6,7 @@ import unittest
from torchvision import io from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold from torchvision.datasets.video_utils import VideoClips, unfold
from torchvision import get_video_backend
from common_utils import get_tmp_dir from common_utils import get_tmp_dir
...@@ -61,22 +62,23 @@ class Tester(unittest.TestCase): ...@@ -61,22 +62,23 @@ class Tester(unittest.TestCase):
@unittest.skipIf(not io.video._av_available(), "this test requires av") @unittest.skipIf(not io.video._av_available(), "this test requires av")
@unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows') @unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows')
def test_video_clips(self): def test_video_clips(self):
_backend = get_video_backend()
with get_list_of_videos(num_videos=3) as video_list: with get_list_of_videos(num_videos=3) as video_list:
video_clips = VideoClips(video_list, 5, 5) video_clips = VideoClips(video_list, 5, 5, _backend=_backend)
self.assertEqual(video_clips.num_clips(), 1 + 2 + 3) self.assertEqual(video_clips.num_clips(), 1 + 2 + 3)
for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]):
video_idx, clip_idx = video_clips.get_clip_location(i) video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx) self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx) self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 6) video_clips = VideoClips(video_list, 6, 6, _backend=_backend)
self.assertEqual(video_clips.num_clips(), 0 + 1 + 2) self.assertEqual(video_clips.num_clips(), 0 + 1 + 2)
for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]):
video_idx, clip_idx = video_clips.get_clip_location(i) video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx) self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx) self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 1) video_clips = VideoClips(video_list, 6, 1, _backend=_backend)
self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1)) self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1))
for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]:
video_idx, clip_idx = video_clips.get_clip_location(i) video_idx, clip_idx = video_clips.get_clip_location(i)
...@@ -85,8 +87,9 @@ class Tester(unittest.TestCase): ...@@ -85,8 +87,9 @@ class Tester(unittest.TestCase):
@unittest.skip("Moved to reference scripts for now") @unittest.skip("Moved to reference scripts for now")
def test_video_sampler(self): def test_video_sampler(self):
_backend = get_video_backend()
with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list:
video_clips = VideoClips(video_list, 5, 5) video_clips = VideoClips(video_list, 5, 5, _backend=_backend)
sampler = RandomClipSampler(video_clips, 3) # noqa: F821 sampler = RandomClipSampler(video_clips, 3) # noqa: F821
self.assertEqual(len(sampler), 3 * 3) self.assertEqual(len(sampler), 3 * 3)
indices = torch.tensor(list(iter(sampler))) indices = torch.tensor(list(iter(sampler)))
...@@ -97,8 +100,9 @@ class Tester(unittest.TestCase): ...@@ -97,8 +100,9 @@ class Tester(unittest.TestCase):
@unittest.skip("Moved to reference scripts for now") @unittest.skip("Moved to reference scripts for now")
def test_video_sampler_unequal(self): def test_video_sampler_unequal(self):
_backend = get_video_backend()
with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list:
video_clips = VideoClips(video_list, 5, 5) video_clips = VideoClips(video_list, 5, 5, _backend=_backend)
sampler = RandomClipSampler(video_clips, 3) # noqa: F821 sampler = RandomClipSampler(video_clips, 3) # noqa: F821
self.assertEqual(len(sampler), 2 + 3 + 3) self.assertEqual(len(sampler), 2 + 3 + 3)
indices = list(iter(sampler)) indices = list(iter(sampler))
...@@ -116,10 +120,11 @@ class Tester(unittest.TestCase): ...@@ -116,10 +120,11 @@ class Tester(unittest.TestCase):
@unittest.skipIf(not io.video._av_available(), "this test requires av") @unittest.skipIf(not io.video._av_available(), "this test requires av")
@unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows') @unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows')
def test_video_clips_custom_fps(self): def test_video_clips_custom_fps(self):
_backend = get_video_backend()
with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list: with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list:
num_frames = 4 num_frames = 4
for fps in [1, 3, 4, 10]: for fps in [1, 3, 4, 10]:
video_clips = VideoClips(video_list, num_frames, num_frames, fps) video_clips = VideoClips(video_list, num_frames, num_frames, fps, _backend=_backend)
for i in range(video_clips.num_clips()): for i in range(video_clips.num_clips()):
video, audio, info, video_idx = video_clips.get_clip(i) video, audio, info, video_idx = video_clips.get_clip(i)
self.assertEqual(video.shape[0], num_frames) self.assertEqual(video.shape[0], num_frames)
......
...@@ -4,6 +4,7 @@ import tempfile ...@@ -4,6 +4,7 @@ import tempfile
import torch import torch
import torchvision.datasets.utils as utils import torchvision.datasets.utils as utils
import torchvision.io as io import torchvision.io as io
from torchvision import get_video_backend
import unittest import unittest
import sys import sys
import warnings import warnings
...@@ -22,6 +23,20 @@ try: ...@@ -22,6 +23,20 @@ try:
except ImportError: except ImportError:
av = None av = None
_video_backend = get_video_backend()
def _read_video(filename, start_pts=0, end_pts=None):
if _video_backend == "pyav":
return io.read_video(filename, start_pts, end_pts)
else:
if end_pts is None:
end_pts = -1
return io._read_video_from_file(
filename,
video_pts_range=(start_pts, end_pts),
)
def _create_video_frames(num_frames, height, width): def _create_video_frames(num_frames, height, width):
y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width)) y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width))
...@@ -44,7 +59,12 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, ...@@ -44,7 +59,12 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None,
options = {'crf': '0'} options = {'crf': '0'}
if video_codec is None: if video_codec is None:
video_codec = 'libx264' if _video_backend == "pyav":
video_codec = 'libx264'
else:
# when video_codec is not set, we assume it is libx264rgb which accepts
# RGB pixel formats as input instead of YUV
video_codec = 'libx264rgb'
if options is None: if options is None:
options = {} options = {}
...@@ -63,15 +83,16 @@ class Tester(unittest.TestCase): ...@@ -63,15 +83,16 @@ class Tester(unittest.TestCase):
def test_write_read_video(self): def test_write_read_video(self):
with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
lv, _, info = io.read_video(f_name) lv, _, info = _read_video(f_name)
self.assertTrue(data.equal(lv)) self.assertTrue(data.equal(lv))
self.assertEqual(info["video_fps"], 5) self.assertEqual(info["video_fps"], 5)
def test_read_timestamps(self): def test_read_timestamps(self):
with temp_video(10, 300, 300, 5) as (f_name, data): with temp_video(10, 300, 300, 5) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name) if _video_backend == "pyav":
pts, _ = io.read_video_timestamps(f_name)
else:
pts, _, _ = io._read_video_timestamps_from_file(f_name)
# note: not all formats/codecs provide accurate information for computing the # note: not all formats/codecs provide accurate information for computing the
# timestamps. For the format that we use here, this information is available, # timestamps. For the format that we use here, this information is available,
# so we use it as a baseline # so we use it as a baseline
...@@ -85,26 +106,35 @@ class Tester(unittest.TestCase): ...@@ -85,26 +106,35 @@ class Tester(unittest.TestCase):
def test_read_partial_video(self): def test_read_partial_video(self):
with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name) if _video_backend == "pyav":
pts, _ = io.read_video_timestamps(f_name)
else:
pts, _, _ = io._read_video_timestamps_from_file(f_name)
for start in range(5): for start in range(5):
for l in range(1, 4): for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1]) lv, _, _ = _read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)] s_data = data[start:(start + l)]
self.assertEqual(len(lv), l) self.assertEqual(len(lv), l)
self.assertTrue(s_data.equal(lv)) self.assertTrue(s_data.equal(lv))
lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) if _video_backend == "pyav":
self.assertEqual(len(lv), 4) # for "video_reader" backend, we don't decode the closest early frame
self.assertTrue(data[4:8].equal(lv)) # when the given start pts is not matching any frame pts
lv, _, _ = _read_video(f_name, pts[4] + 1, pts[7])
self.assertEqual(len(lv), 4)
self.assertTrue(data[4:8].equal(lv))
def test_read_partial_video_bframes(self): def test_read_partial_video_bframes(self):
# do not use lossless encoding, to test the presence of B-frames # do not use lossless encoding, to test the presence of B-frames
options = {'bframes': '16', 'keyint': '10', 'min-keyint': '4'} options = {'bframes': '16', 'keyint': '10', 'min-keyint': '4'}
with temp_video(100, 300, 300, 5, options=options) as (f_name, data): with temp_video(100, 300, 300, 5, options=options) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name) if _video_backend == "pyav":
pts, _ = io.read_video_timestamps(f_name)
else:
pts, _, _ = io._read_video_timestamps_from_file(f_name)
for start in range(0, 80, 20): for start in range(0, 80, 20):
for l in range(1, 4): for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1]) lv, _, _ = _read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)] s_data = data[start:(start + l)]
self.assertEqual(len(lv), l) self.assertEqual(len(lv), l)
self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE) self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE)
...@@ -120,7 +150,12 @@ class Tester(unittest.TestCase): ...@@ -120,7 +150,12 @@ class Tester(unittest.TestCase):
url = "https://download.pytorch.org/vision_tests/io/" + name url = "https://download.pytorch.org/vision_tests/io/" + name
try: try:
utils.download_url(url, temp_dir) utils.download_url(url, temp_dir)
pts, fps = io.read_video_timestamps(f_name) if _video_backend == "pyav":
pts, fps = io.read_video_timestamps(f_name)
else:
pts, _, info = io._read_video_timestamps_from_file(f_name)
fps = info["video_fps"]
self.assertEqual(pts, sorted(pts)) self.assertEqual(pts, sorted(pts))
self.assertEqual(fps, 30) self.assertEqual(fps, 30)
except URLError: except URLError:
...@@ -130,8 +165,10 @@ class Tester(unittest.TestCase): ...@@ -130,8 +165,10 @@ class Tester(unittest.TestCase):
def test_read_timestamps_from_packet(self): def test_read_timestamps_from_packet(self):
with temp_video(10, 300, 300, 5, video_codec='mpeg4') as (f_name, data): with temp_video(10, 300, 300, 5, video_codec='mpeg4') as (f_name, data):
pts, _ = io.read_video_timestamps(f_name) if _video_backend == "pyav":
pts, _ = io.read_video_timestamps(f_name)
else:
pts, _, _ = io._read_video_timestamps_from_file(f_name)
# note: not all formats/codecs provide accurate information for computing the # note: not all formats/codecs provide accurate information for computing the
# timestamps. For the format that we use here, this information is available, # timestamps. For the format that we use here, this information is available,
# so we use it as a baseline # so we use it as a baseline
......
...@@ -14,6 +14,8 @@ except ImportError: ...@@ -14,6 +14,8 @@ except ImportError:
_image_backend = 'PIL' _image_backend = 'PIL'
_video_backend = "pyav"
def set_image_backend(backend): def set_image_backend(backend):
""" """
...@@ -38,6 +40,30 @@ def get_image_backend(): ...@@ -38,6 +40,30 @@ def get_image_backend():
return _image_backend return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native c++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It is generally decoding faster than pyav, but perhaps is less robust.
"""
global _video_backend
if backend not in ["pyav", "video_reader"]:
raise ValueError(
"Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend
)
_video_backend = backend
def get_video_backend():
return _video_backend
def _is_tracing(): def _is_tracing():
import torch import torch
return torch._C._get_tracing_state() return torch._C._get_tracing_state()
import bisect import bisect
from fractions import Fraction
import math import math
import torch import torch
from torchvision.io import (
_read_video_timestamps_from_file,
_read_video_from_file,
)
from torchvision.io import read_video_timestamps, read_video from torchvision.io import read_video_timestamps, read_video
from .utils import tqdm from .utils import tqdm
def pts_convert(pts, timebase_from, timebase_to, round_func=math.floor):
"""convert pts between different time bases
Args:
pts: presentation timestamp, float
timebase_from: original timebase. Fraction
timebase_to: new timebase. Fraction
round_func: rounding function.
"""
new_pts = Fraction(pts, 1) * timebase_from / timebase_to
return round_func(new_pts)
def unfold(tensor, size, step, dilation=1): def unfold(tensor, size, step, dilation=1):
""" """
similar to tensor.unfold, but with the dilation similar to tensor.unfold, but with the dilation
...@@ -49,9 +66,11 @@ class VideoClips(object): ...@@ -49,9 +66,11 @@ class VideoClips(object):
on the resampled video on the resampled video
""" """
def __init__(self, video_paths, clip_length_in_frames=16, frames_between_clips=1, def __init__(self, video_paths, clip_length_in_frames=16, frames_between_clips=1,
frame_rate=None, _precomputed_metadata=None, num_workers=1): frame_rate=None, _precomputed_metadata=None, num_workers=1,
_backend="pyav"):
self.video_paths = video_paths self.video_paths = video_paths
self.num_workers = num_workers self.num_workers = num_workers
self._backend = _backend
if _precomputed_metadata is None: if _precomputed_metadata is None:
self._compute_frame_pts() self._compute_frame_pts()
else: else:
...@@ -60,23 +79,30 @@ class VideoClips(object): ...@@ -60,23 +79,30 @@ class VideoClips(object):
def _compute_frame_pts(self): def _compute_frame_pts(self):
self.video_pts = [] self.video_pts = []
self.video_fps = [] if self._backend == "pyav":
self.video_fps = []
else:
self.info = []
# strategy: use a DataLoader to parallelize read_video_timestamps # strategy: use a DataLoader to parallelize read_video_timestamps
# so need to create a dummy dataset first # so need to create a dummy dataset first
class DS(object): class DS(object):
def __init__(self, x): def __init__(self, x, _backend):
self.x = x self.x = x
self._backend = _backend
def __len__(self): def __len__(self):
return len(self.x) return len(self.x)
def __getitem__(self, idx): def __getitem__(self, idx):
return read_video_timestamps(self.x[idx]) if self._backend == "pyav":
return read_video_timestamps(self.x[idx])
else:
return _read_video_timestamps_from_file(self.x[idx])
import torch.utils.data import torch.utils.data
dl = torch.utils.data.DataLoader( dl = torch.utils.data.DataLoader(
DS(self.video_paths), DS(self.video_paths, self._backend),
batch_size=16, batch_size=16,
num_workers=self.num_workers, num_workers=self.num_workers,
collate_fn=lambda x: x) collate_fn=lambda x: x)
...@@ -84,25 +110,55 @@ class VideoClips(object): ...@@ -84,25 +110,55 @@ class VideoClips(object):
with tqdm(total=len(dl)) as pbar: with tqdm(total=len(dl)) as pbar:
for batch in dl: for batch in dl:
pbar.update(1) pbar.update(1)
clips, fps = list(zip(*batch)) if self._backend == "pyav":
clips = [torch.as_tensor(c) for c in clips] clips, fps = list(zip(*batch))
self.video_pts.extend(clips) clips = [torch.as_tensor(c) for c in clips]
self.video_fps.extend(fps) self.video_pts.extend(clips)
self.video_fps.extend(fps)
else:
video_pts, _audio_pts, info = list(zip(*batch))
video_pts = [torch.as_tensor(c) for c in video_pts]
self.video_pts.extend(video_pts)
self.info.extend(info)
def _init_from_metadata(self, metadata): def _init_from_metadata(self, metadata):
self.video_paths = metadata["video_paths"]
assert len(self.video_paths) == len(metadata["video_pts"]) assert len(self.video_paths) == len(metadata["video_pts"])
assert len(self.video_paths) == len(metadata["video_fps"])
self.video_pts = metadata["video_pts"] self.video_pts = metadata["video_pts"]
self.video_fps = metadata["video_fps"]
if self._backend == "pyav":
assert len(self.video_paths) == len(metadata["video_fps"])
self.video_fps = metadata["video_fps"]
else:
assert len(self.video_paths) == len(metadata["info"])
self.info = metadata["info"]
@property
def metadata(self):
_metadata = {
"video_paths": self.video_paths,
"video_pts": self.video_pts,
}
if self._backend == "pyav":
_metadata.update({"video_fps": self.video_fps})
else:
_metadata.update({"info": self.info})
def subset(self, indices): def subset(self, indices):
video_paths = [self.video_paths[i] for i in indices] video_paths = [self.video_paths[i] for i in indices]
video_pts = [self.video_pts[i] for i in indices] video_pts = [self.video_pts[i] for i in indices]
video_fps = [self.video_fps[i] for i in indices] if self._backend == "pyav":
video_fps = [self.video_fps[i] for i in indices]
else:
info = [self.info[i] for i in indices]
metadata = { metadata = {
"video_paths": video_paths,
"video_pts": video_pts, "video_pts": video_pts,
"video_fps": video_fps
} }
if self._backend == "pyav":
metadata.update({"video_fps": video_fps})
else:
metadata.update({"info": info})
return type(self)(video_paths, self.num_frames, self.step, self.frame_rate, return type(self)(video_paths, self.num_frames, self.step, self.frame_rate,
_precomputed_metadata=metadata) _precomputed_metadata=metadata)
...@@ -141,10 +197,16 @@ class VideoClips(object): ...@@ -141,10 +197,16 @@ class VideoClips(object):
self.frame_rate = frame_rate self.frame_rate = frame_rate
self.clips = [] self.clips = []
self.resampling_idxs = [] self.resampling_idxs = []
for video_pts, fps in zip(self.video_pts, self.video_fps): if self._backend == "pyav":
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate) for video_pts, fps in zip(self.video_pts, self.video_fps):
self.clips.append(clips) clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate)
self.resampling_idxs.append(idxs) self.clips.append(clips)
self.resampling_idxs.append(idxs)
else:
for video_pts, info in zip(self.video_pts, self.info):
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, info["video_fps"], frame_rate)
self.clips.append(clips)
self.resampling_idxs.append(idxs)
clip_lengths = torch.as_tensor([len(v) for v in self.clips]) clip_lengths = torch.as_tensor([len(v) for v in self.clips])
self.cumulative_sizes = clip_lengths.cumsum(0).tolist() self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
...@@ -203,9 +265,40 @@ class VideoClips(object): ...@@ -203,9 +265,40 @@ class VideoClips(object):
video_idx, clip_idx = self.get_clip_location(idx) video_idx, clip_idx = self.get_clip_location(idx)
video_path = self.video_paths[video_idx] video_path = self.video_paths[video_idx]
clip_pts = self.clips[video_idx][clip_idx] clip_pts = self.clips[video_idx][clip_idx]
start_pts = clip_pts[0].item()
end_pts = clip_pts[-1].item() if self._backend == "pyav":
video, audio, info = read_video(video_path, start_pts, end_pts) start_pts = clip_pts[0].item()
end_pts = clip_pts[-1].item()
video, audio, info = read_video(video_path, start_pts, end_pts)
else:
info = self.info[video_idx]
video_start_pts = clip_pts[0].item()
video_end_pts = clip_pts[-1].item()
audio_start_pts, audio_end_pts = 0, -1
audio_timebase = Fraction(0, 1)
if "audio_timebase" in info:
audio_timebase = info["audio_timebase"]
audio_start_pts = pts_convert(
video_start_pts,
info["video_timebase"],
info["audio_timebase"],
math.floor,
)
audio_end_pts = pts_convert(
video_start_pts,
info["video_timebase"],
info["audio_timebase"],
math.ceil,
)
video, audio, info = _read_video_from_file(
video_path,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase=info["video_timebase"],
audio_pts_range=(audio_start_pts, audio_end_pts),
audio_timebase=audio_timebase,
)
if self.frame_rate is not None: if self.frame_rate is not None:
resampling_idx = self.resampling_idxs[video_idx][clip_idx] resampling_idx = self.resampling_idxs[video_idx][clip_idx]
if isinstance(resampling_idx, torch.Tensor): if isinstance(resampling_idx, torch.Tensor):
......
...@@ -4,4 +4,5 @@ from ._video_opt import _read_video_from_file, _read_video_timestamps_from_file ...@@ -4,4 +4,5 @@ from ._video_opt import _read_video_from_file, _read_video_timestamps_from_file
__all__ = [ __all__ = [
'write_video', 'read_video', 'read_video_timestamps', 'write_video', 'read_video', 'read_video_timestamps',
'_read_video_from_file', '_read_video_timestamps_from_file',
] ]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment