Unverified Commit 79daca10 authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Make video transforms private (#1429)

parent e48b9584
from __future__ import division from __future__ import division
import torch import torch
import torchvision.transforms as transforms import torchvision.transforms._transforms_video as transforms
from torchvision.transforms import Compose
import unittest import unittest
import random import random
import numpy as np import numpy as np
...@@ -20,7 +21,7 @@ class TestVideoTransforms(unittest.TestCase): ...@@ -20,7 +21,7 @@ class TestVideoTransforms(unittest.TestCase):
oheight = random.randint(5, (height - 2) / 2) * 2 oheight = random.randint(5, (height - 2) / 2) * 2
owidth = random.randint(5, (width - 2) / 2) * 2 owidth = random.randint(5, (width - 2) / 2) * 2
clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8) clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8)
result = transforms.Compose([ result = Compose([
transforms.ToTensorVideo(), transforms.ToTensorVideo(),
transforms.RandomCropVideo((oheight, owidth)), transforms.RandomCropVideo((oheight, owidth)),
])(clip) ])(clip)
...@@ -36,7 +37,7 @@ class TestVideoTransforms(unittest.TestCase): ...@@ -36,7 +37,7 @@ class TestVideoTransforms(unittest.TestCase):
oheight = random.randint(5, (height - 2) / 2) * 2 oheight = random.randint(5, (height - 2) / 2) * 2
owidth = random.randint(5, (width - 2) / 2) * 2 owidth = random.randint(5, (width - 2) / 2) * 2
clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8) clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8)
result = transforms.Compose([ result = Compose([
transforms.ToTensorVideo(), transforms.ToTensorVideo(),
transforms.RandomResizedCropVideo((oheight, owidth)), transforms.RandomResizedCropVideo((oheight, owidth)),
])(clip) ])(clip)
...@@ -57,7 +58,7 @@ class TestVideoTransforms(unittest.TestCase): ...@@ -57,7 +58,7 @@ class TestVideoTransforms(unittest.TestCase):
ow1 = (width - owidth) // 2 ow1 = (width - owidth) // 2
clipNarrow = clip[:, oh1:oh1 + oheight, ow1:ow1 + owidth, :] clipNarrow = clip[:, oh1:oh1 + oheight, ow1:ow1 + owidth, :]
clipNarrow.fill_(0) clipNarrow.fill_(0)
result = transforms.Compose([ result = Compose([
transforms.ToTensorVideo(), transforms.ToTensorVideo(),
transforms.CenterCropVideo((oheight, owidth)), transforms.CenterCropVideo((oheight, owidth)),
])(clip) ])(clip)
...@@ -68,7 +69,7 @@ class TestVideoTransforms(unittest.TestCase): ...@@ -68,7 +69,7 @@ class TestVideoTransforms(unittest.TestCase):
oheight += 1 oheight += 1
owidth += 1 owidth += 1
result = transforms.Compose([ result = Compose([
transforms.ToTensorVideo(), transforms.ToTensorVideo(),
transforms.CenterCropVideo((oheight, owidth)), transforms.CenterCropVideo((oheight, owidth)),
])(clip) ])(clip)
...@@ -80,7 +81,7 @@ class TestVideoTransforms(unittest.TestCase): ...@@ -80,7 +81,7 @@ class TestVideoTransforms(unittest.TestCase):
oheight += 1 oheight += 1
owidth += 1 owidth += 1
result = transforms.Compose([ result = Compose([
transforms.ToTensorVideo(), transforms.ToTensorVideo(),
transforms.CenterCropVideo((oheight, owidth)), transforms.CenterCropVideo((oheight, owidth)),
])(clip) ])(clip)
......
from .transforms import * from .transforms import *
from .transforms_video import *
...@@ -8,7 +8,7 @@ from torchvision.transforms import ( ...@@ -8,7 +8,7 @@ from torchvision.transforms import (
RandomResizedCrop, RandomResizedCrop,
) )
from . import functional_video as F from . import _functional_video as F
__all__ = [ __all__ = [
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment