"git@developer.sourcefind.cn:OpenDAS/torchaudio.git" did not exist on "a8ce4a8775f098d57943a29b5389bda16adffc25"
Unverified Commit 54160313 authored by vfdev's avatar vfdev Committed by GitHub
Browse files

[jit] Updated tests checking save and load transforms (#6248)

* Updated tests to save and load transforms

* Fixed code formatting issue
parent 8a45147f
......@@ -93,6 +93,13 @@ def _test_op(func, method, device, channels=3, fn_kwargs=None, meth_kwargs=None,
_test_class_op(method, device, channels, meth_kwargs, test_exact_match=test_exact_match, **match_kwargs)
def _test_fn_save_load(fn, tmpdir):
scripted_fn = torch.jit.script(fn)
p = os.path.join(tmpdir, f"t_op_list_{fn.__name__ if hasattr(fn, '__name__') else fn.__class__.__name__}.pt")
scripted_fn.save(p)
_ = torch.jit.load(p)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"func,method,fn_kwargs,match_kwargs",
......@@ -204,9 +211,7 @@ def test_pad(m, mul, device):
_test_functional_op(F.pad, fn_kwargs={"padding": mul * 2, "fill": fill, "padding_mode": m}, device=device)
# Test functional.pad and transforms.Pad with padding as [int, ]
fn_kwargs = meth_kwargs = {
"padding": [
mul * 2,
],
"padding": [mul * 2],
"fill": fill,
"padding_mode": m,
}
......@@ -253,33 +258,30 @@ def test_crop(device):
[
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
],
)
@pytest.mark.parametrize(
"size",
[
5,
[
5,
],
[6, 6],
],
)
def test_crop_pad(size, padding_config, device):
@pytest.mark.parametrize("pad_if_needed", [True, False])
@pytest.mark.parametrize("padding", [[5], [5, 4], [1, 2, 3, 4]])
@pytest.mark.parametrize("size", [5, [5], [6, 6]])
def test_random_crop(size, padding, pad_if_needed, padding_config, device):
config = dict(padding_config)
config["size"] = size
config["padding"] = padding
config["pad_if_needed"] = pad_if_needed
_test_class_op(T.RandomCrop, device, meth_kwargs=config)
def test_random_crop_save_load(tmpdir):
fn = T.RandomCrop(32, [4], pad_if_needed=True)
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_center_crop(device, tmpdir):
fn_kwargs = {"output_size": (4, 5)}
meth_kwargs = {
"size": (4, 5),
}
meth_kwargs = {"size": (4, 5)}
_test_op(F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs)
fn_kwargs = {"output_size": (5,)}
meth_kwargs = {"size": (5,)}
......@@ -291,11 +293,7 @@ def test_center_crop(device, tmpdir):
scripted_fn(tensor)
# Test torchscript of transforms.CenterCrop with size as [int, ]
f = T.CenterCrop(
size=[
5,
]
)
f = T.CenterCrop(size=[5])
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
......@@ -304,7 +302,10 @@ def test_center_crop(device, tmpdir):
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
scripted_fn.save(os.path.join(tmpdir, "t_center_crop.pt"))
def test_center_crop_save_load(tmpdir):
fn = T.CenterCrop(size=[5])
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
......@@ -317,17 +318,7 @@ def test_center_crop(device, tmpdir):
(F.ten_crop, T.TenCrop, 10),
],
)
@pytest.mark.parametrize(
"size",
[
(5,),
[
5,
],
(4, 5),
[4, 5],
],
)
@pytest.mark.parametrize("size", [(5,), [5], (4, 5), [4, 5]])
def test_x_crop(fn, method, out_length, size, device):
meth_kwargs = fn_kwargs = {"size": size}
scripted_fn = torch.jit.script(fn)
......@@ -366,14 +357,9 @@ def test_x_crop(fn, method, out_length, size, device):
@pytest.mark.parametrize("method", ["FiveCrop", "TenCrop"])
def test_x_crop_save(method, tmpdir):
fn = getattr(T, method)(
size=[
5,
]
)
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, f"t_op_list_{method}.pt"))
def test_x_crop_save_load(method, tmpdir):
fn = getattr(T, method)(size=[5])
_test_fn_save_load(fn, tmpdir)
class TestResize:
......@@ -391,15 +377,7 @@ class TestResize:
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64])
@pytest.mark.parametrize(
"size",
[
[32],
[32, 32],
(32, 32),
[34, 35],
],
)
@pytest.mark.parametrize("size", [[32], [32, 32], (32, 32), [34, 35]])
@pytest.mark.parametrize("max_size", [None, 35, 1000])
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC, NEAREST])
def test_resize_scripted(self, dt, size, max_size, interpolation, device):
......@@ -417,25 +395,14 @@ class TestResize:
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_resize_save(self, tmpdir):
transform = T.Resize(size=[32])
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_resize.pt"))
def test_resize_save_load(self, tmpdir):
fn = T.Resize(size=[32])
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("scale", [(0.7, 1.2), [0.7, 1.2]])
@pytest.mark.parametrize("ratio", [(0.75, 1.333), [0.75, 1.333]])
@pytest.mark.parametrize(
"size",
[
(32,),
[44],
[32],
[32, 32],
(32, 32),
[44, 55],
],
)
@pytest.mark.parametrize("size", [(32,), [44], [32], [32, 32], (32, 32), [44, 55]])
@pytest.mark.parametrize("interpolation", [NEAREST, BILINEAR, BICUBIC])
@pytest.mark.parametrize("antialias", [None, True, False])
def test_resized_crop(self, scale, ratio, size, interpolation, antialias, device):
......@@ -452,14 +419,9 @@ class TestResize:
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_resized_crop_save(self, tmpdir):
transform = T.RandomResizedCrop(
size=[
32,
]
)
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_resized_crop.pt"))
def test_resized_crop_save_load(self, tmpdir):
fn = T.RandomResizedCrop(size=[32])
_test_fn_save_load(fn, tmpdir)
def _test_random_affine_helper(device, **kwargs):
......@@ -472,11 +434,9 @@ def _test_random_affine_helper(device, **kwargs):
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_random_affine(device, tmpdir):
transform = T.RandomAffine(degrees=45.0)
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_random_affine.pt"))
def test_random_affine_save_load(tmpdir):
fn = T.RandomAffine(degrees=45.0)
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
......@@ -509,19 +469,7 @@ def test_random_affine_degrees(device, interpolation, degrees):
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("interpolation", [NEAREST, BILINEAR])
@pytest.mark.parametrize(
"fill",
[
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_random_affine_fill(device, interpolation, fill):
_test_random_affine_helper(device, degrees=0.0, interpolation=interpolation, fill=fill)
......@@ -531,19 +479,7 @@ def test_random_affine_fill(device, interpolation, fill):
@pytest.mark.parametrize("expand", [True, False])
@pytest.mark.parametrize("degrees", [45, 35.0, (-45, 45), [-90.0, 90.0]])
@pytest.mark.parametrize("interpolation", [NEAREST, BILINEAR])
@pytest.mark.parametrize(
"fill",
[
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_random_rotate(device, center, expand, degrees, interpolation, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -555,28 +491,15 @@ def test_random_rotate(device, center, expand, degrees, interpolation, fill):
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_random_rotate_save(tmpdir):
transform = T.RandomRotation(degrees=45.0)
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_random_rotate.pt"))
def test_random_rotate_save_load(tmpdir):
fn = T.RandomRotation(degrees=45.0)
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("distortion_scale", np.linspace(0.1, 1.0, num=20))
@pytest.mark.parametrize("interpolation", [NEAREST, BILINEAR])
@pytest.mark.parametrize(
"fill",
[
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_random_perspective(device, distortion_scale, interpolation, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -588,10 +511,9 @@ def test_random_perspective(device, distortion_scale, interpolation, fill):
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_random_perspective_save(tmpdir):
transform = T.RandomPerspective()
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_perspective.pt"))
def test_random_perspective_save_load(tmpdir):
fn = T.RandomPerspective()
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
......@@ -630,28 +552,14 @@ def test_convert_image_dtype(device, in_dtype, out_dtype):
_test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors)
def test_convert_image_dtype_save(tmpdir):
def test_convert_image_dtype_save_load(tmpdir):
fn = T.ConvertImageDtype(dtype=torch.uint8)
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, "t_convert_dtype.pt"))
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("policy", [policy for policy in T.AutoAugmentPolicy])
@pytest.mark.parametrize(
"fill",
[
None,
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_autoaugment(device, policy, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -666,20 +574,7 @@ def test_autoaugment(device, policy, fill):
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("num_ops", [1, 2, 3])
@pytest.mark.parametrize("magnitude", [7, 9, 11])
@pytest.mark.parametrize(
"fill",
[
None,
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_randaugment(device, num_ops, magnitude, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -692,20 +587,7 @@ def test_randaugment(device, num_ops, magnitude, fill):
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"fill",
[
None,
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_trivialaugmentwide(device, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -718,20 +600,7 @@ def test_trivialaugmentwide(device, fill):
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"fill",
[
None,
85,
(10, -10, 10),
0.7,
[0.0, 0.0, 0.0],
[
1,
],
1,
],
)
@pytest.mark.parametrize("fill", [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1], 1])
def test_augmix(device, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
......@@ -749,10 +618,9 @@ def test_augmix(device, fill):
@pytest.mark.parametrize("augmentation", [T.AutoAugment, T.RandAugment, T.TrivialAugmentWide, T.AugMix])
def test_autoaugment_save(augmentation, tmpdir):
transform = augmentation()
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_autoaugment.pt"))
def test_autoaugment_save_load(augmentation, tmpdir):
fn = augmentation()
_test_fn_save_load(fn, tmpdir)
@pytest.mark.parametrize("interpolation", [F.InterpolationMode.NEAREST, F.InterpolationMode.BILINEAR])
......@@ -812,10 +680,9 @@ def test_random_erasing(device, config):
_test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)
def test_random_erasing_save(tmpdir):
def test_random_erasing_save_load(tmpdir):
fn = T.RandomErasing(value=0.2)
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, "t_random_erasing.pt"))
_test_fn_save_load(fn, tmpdir)
def test_random_erasing_with_invalid_data():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment