"git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "64af74fc581711a2ae595fe9435fc35399f9f48c"
Unverified Commit 4774fe3a authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Split tests for transforms v2 and prototype (#7278)

parent ac1512b6
...@@ -6,4 +6,17 @@ eval "$(./conda/bin/conda shell.bash hook)" ...@@ -6,4 +6,17 @@ eval "$(./conda/bin/conda shell.bash hook)"
conda activate ./env conda activate ./env
python -m torch.utils.collect_env python -m torch.utils.collect_env
pytest --junitxml=test-results/junit.xml -v --durations 20
case "$(uname -s)" in
Darwin*)
# The largest macOS runner is not able to handle the regular test suite plus the transforms v2 tests at the same
# time due to insufficient resources. Thus, we ignore the transforms v2 tests at first and run them in a separate
# step afterwards.
GLOB='test/test_transforms_v2*'
pytest --junitxml=test-results/junit.xml -v --durations 20 --ignore-glob="${GLOB}"
eval "pytest --junitxml=test-results/junit-transforms-v2.xml -v --durations 20 ${GLOB}"
;;
*)
pytest --junitxml=test-results/junit.xml -v --durations 20
;;
esac
...@@ -18,7 +18,8 @@ from collections import Counter, defaultdict ...@@ -18,7 +18,8 @@ from collections import Counter, defaultdict
import numpy as np import numpy as np
import pytest import pytest
import torch import torch
from datasets_utils import combinations_grid, create_image_file, create_image_folder, make_tar, make_zip from common_utils import combinations_grid
from datasets_utils import create_image_file, create_image_folder, make_tar, make_zip
from torch.nn.functional import one_hot from torch.nn.functional import one_hot
from torch.testing import make_tensor as _make_tensor from torch.testing import make_tensor as _make_tensor
from torchvision.prototype import datasets from torchvision.prototype import datasets
......
This diff is collapsed.
...@@ -170,23 +170,6 @@ def test_all_configs(test): ...@@ -170,23 +170,6 @@ def test_all_configs(test):
return wrapper return wrapper
def combinations_grid(**kwargs):
"""Creates a grid of input combinations.
Each element in the returned sequence is a dictionary containing one possible combination as values.
Example:
>>> combinations_grid(foo=("bar", "baz"), spam=("eggs", "ham"))
[
{'foo': 'bar', 'spam': 'eggs'},
{'foo': 'bar', 'spam': 'ham'},
{'foo': 'baz', 'spam': 'eggs'},
{'foo': 'baz', 'spam': 'ham'}
]
"""
return [dict(zip(kwargs.keys(), values)) for values in itertools.product(*kwargs.values())]
class DatasetTestCase(unittest.TestCase): class DatasetTestCase(unittest.TestCase):
"""Abstract base class for all dataset testcases. """Abstract base class for all dataset testcases.
......
This diff is collapsed.
import pytest
import torch
from PIL import Image
from torchvision import datapoints
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 and image.shape[0] == 3
@pytest.mark.parametrize("data", [torch.randint(0, 10, size=(1, 32, 32)), Image.new("L", (32, 32), color=2)])
def test_mask_instance(data):
mask = datapoints.Mask(data)
assert isinstance(mask, torch.Tensor)
assert mask.ndim == 3 and mask.shape[0] == 1
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 4)), [[0, 0, 5, 5], [2, 2, 7, 7]]])
@pytest.mark.parametrize(
"format", ["XYXY", "CXCYWH", datapoints.BoundingBoxFormat.XYXY, datapoints.BoundingBoxFormat.XYWH]
)
def test_bbox_instance(data, format):
bboxes = datapoints.BoundingBox(data, format=format, spatial_size=(32, 32))
assert isinstance(bboxes, torch.Tensor)
assert bboxes.ndim == 2 and bboxes.shape[1] == 4
if isinstance(format, str):
format = datapoints.BoundingBoxFormat.from_str(format.upper())
assert bboxes.format == format
This diff is collapsed.
import pytest import pytest
import torch import torch
from PIL import Image
from torchvision import datapoints
from torchvision.prototype import datapoints as proto_datapoints from torchvision.prototype import datapoints as proto_datapoints
...@@ -134,30 +131,3 @@ def test_wrap_like(): ...@@ -134,30 +131,3 @@ def test_wrap_like():
assert type(label_new) is proto_datapoints.Label assert type(label_new) is proto_datapoints.Label
assert label_new.data_ptr() == output.data_ptr() assert label_new.data_ptr() == output.data_ptr()
assert label_new.categories is label.categories assert label_new.categories is label.categories
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 and image.shape[0] == 3
@pytest.mark.parametrize("data", [torch.randint(0, 10, size=(1, 32, 32)), Image.new("L", (32, 32), color=2)])
def test_mask_instance(data):
mask = datapoints.Mask(data)
assert isinstance(mask, torch.Tensor)
assert mask.ndim == 3 and mask.shape[0] == 1
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 4)), [[0, 0, 5, 5], [2, 2, 7, 7]]])
@pytest.mark.parametrize(
"format", ["XYXY", "CXCYWH", datapoints.BoundingBoxFormat.XYXY, datapoints.BoundingBoxFormat.XYWH]
)
def test_bbox_instance(data, format):
bboxes = datapoints.BoundingBox(data, format=format, spatial_size=(32, 32))
assert isinstance(bboxes, torch.Tensor)
assert bboxes.ndim == 2 and bboxes.shape[1] == 4
if isinstance(format, str):
format = datapoints.BoundingBoxFormat.from_str(format.upper())
assert bboxes.format == format
This diff is collapsed.
This diff is collapsed.
...@@ -12,9 +12,8 @@ import PIL.Image ...@@ -12,9 +12,8 @@ import PIL.Image
import pytest import pytest
import torch import torch
import torchvision.prototype.transforms as prototype_transforms
import torchvision.transforms.v2 as v2_transforms import torchvision.transforms.v2 as v2_transforms
from prototype_common_utils import ( from common_utils import (
ArgsKwargs, ArgsKwargs,
assert_close, assert_close,
assert_equal, assert_equal,
...@@ -22,7 +21,6 @@ from prototype_common_utils import ( ...@@ -22,7 +21,6 @@ from prototype_common_utils import (
make_detection_mask, make_detection_mask,
make_image, make_image,
make_images, make_images,
make_label,
make_segmentation_mask, make_segmentation_mask,
) )
from torch import nn from torch import nn
...@@ -1056,6 +1054,9 @@ class TestRefDetTransforms: ...@@ -1056,6 +1054,9 @@ class TestRefDetTransforms:
size = (600, 800) size = (600, 800)
num_objects = 22 num_objects = 22
def make_label(extra_dims, categories):
return torch.randint(categories, extra_dims, dtype=torch.int64)
pil_image = to_image_pil(make_image(size=size, color_space="RGB")) pil_image = to_image_pil(make_image(size=size, color_space="RGB"))
target = { target = {
"boxes": make_bounding_box(spatial_size=size, format="XYXY", extra_dims=(num_objects,), dtype=torch.float), "boxes": make_bounding_box(spatial_size=size, format="XYXY", extra_dims=(num_objects,), dtype=torch.float),
...@@ -1102,11 +1103,6 @@ class TestRefDetTransforms: ...@@ -1102,11 +1103,6 @@ class TestRefDetTransforms:
), ),
(det_transforms.RandomZoomOut(), v2_transforms.RandomZoomOut(), {"with_mask": False}), (det_transforms.RandomZoomOut(), v2_transforms.RandomZoomOut(), {"with_mask": False}),
(det_transforms.ScaleJitter((1024, 1024)), v2_transforms.ScaleJitter((1024, 1024)), {}), (det_transforms.ScaleJitter((1024, 1024)), v2_transforms.ScaleJitter((1024, 1024)), {}),
(
det_transforms.FixedSizeCrop((1024, 1024), fill=0),
prototype_transforms.FixedSizeCrop((1024, 1024), fill=0),
{},
),
( (
det_transforms.RandomShortestSize( det_transforms.RandomShortestSize(
min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333 min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333
......
...@@ -11,15 +11,16 @@ import pytest ...@@ -11,15 +11,16 @@ import pytest
import torch import torch
from common_utils import cache, cpu_and_gpu, needs_cuda, set_rng_seed from common_utils import (
from prototype_common_utils import (
assert_close, assert_close,
cache,
cpu_and_gpu,
DEFAULT_SQUARE_SPATIAL_SIZE, DEFAULT_SQUARE_SPATIAL_SIZE,
make_bounding_boxes, make_bounding_boxes,
needs_cuda,
parametrized_error_message, parametrized_error_message,
set_rng_seed,
) )
from prototype_transforms_dispatcher_infos import DISPATCHER_INFOS
from prototype_transforms_kernel_infos import KERNEL_INFOS
from torch.utils._pytree import tree_map from torch.utils._pytree import tree_map
from torchvision import datapoints from torchvision import datapoints
from torchvision.transforms.functional import _get_perspective_coeffs from torchvision.transforms.functional import _get_perspective_coeffs
...@@ -27,6 +28,8 @@ from torchvision.transforms.v2 import functional as F ...@@ -27,6 +28,8 @@ from torchvision.transforms.v2 import functional as F
from torchvision.transforms.v2.functional._geometry import _center_crop_compute_padding from torchvision.transforms.v2.functional._geometry import _center_crop_compute_padding
from torchvision.transforms.v2.functional._meta import clamp_bounding_box, convert_format_bounding_box from torchvision.transforms.v2.functional._meta import clamp_bounding_box, convert_format_bounding_box
from torchvision.transforms.v2.utils import is_simple_tensor from torchvision.transforms.v2.utils import is_simple_tensor
from transforms_v2_dispatcher_infos import DISPATCHER_INFOS
from transforms_v2_kernel_infos import KERNEL_INFOS
KERNEL_INFOS_MAP = {info.kernel: info for info in KERNEL_INFOS} KERNEL_INFOS_MAP = {info.kernel: info for info in KERNEL_INFOS}
...@@ -635,7 +638,7 @@ class TestConvertFormatBoundingBox: ...@@ -635,7 +638,7 @@ class TestConvertFormatBoundingBox:
# TODO: All correctness checks below this line should be ported to be references on a `KernelInfo` in # TODO: All correctness checks below this line should be ported to be references on a `KernelInfo` in
# `prototype_transforms_kernel_infos.py` # `transforms_v2_kernel_infos.py`
def _compute_affine_matrix(angle_, translate_, scale_, shear_, center_): def _compute_affine_matrix(angle_, translate_, scale_, shear_, center_):
......
...@@ -4,7 +4,7 @@ import pytest ...@@ -4,7 +4,7 @@ import pytest
import torch import torch
import torchvision.transforms.v2.utils import torchvision.transforms.v2.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image from common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision import datapoints from torchvision import datapoints
from torchvision.transforms.v2.functional import to_image_pil from torchvision.transforms.v2.functional import to_image_pil
......
...@@ -2,9 +2,9 @@ import collections.abc ...@@ -2,9 +2,9 @@ import collections.abc
import pytest import pytest
import torchvision.transforms.v2.functional as F import torchvision.transforms.v2.functional as F
from prototype_common_utils import InfoBase, TestMark from common_utils import InfoBase, TestMark
from prototype_transforms_kernel_infos import KERNEL_INFOS, pad_xfail_jit_fill_condition
from torchvision import datapoints from torchvision import datapoints
from transforms_v2_kernel_infos import KERNEL_INFOS, pad_xfail_jit_fill_condition
__all__ = ["DispatcherInfo", "DISPATCHER_INFOS"] __all__ = ["DispatcherInfo", "DISPATCHER_INFOS"]
...@@ -49,7 +49,7 @@ class DispatcherInfo(InfoBase): ...@@ -49,7 +49,7 @@ class DispatcherInfo(InfoBase):
if not kernel_info: if not kernel_info:
raise pytest.UsageError( raise pytest.UsageError(
f"Can't register {kernel.__name__} for type {datapoint_type} since there is no `KernelInfo` for it. " f"Can't register {kernel.__name__} for type {datapoint_type} since there is no `KernelInfo` for it. "
f"Please add a `KernelInfo` for it in `prototype_transforms_kernel_infos.py`." f"Please add a `KernelInfo` for it in `transforms_v2_kernel_infos.py`."
) )
kernel_infos[datapoint_type] = kernel_info kernel_infos[datapoint_type] = kernel_info
self.kernel_infos = kernel_infos self.kernel_infos = kernel_infos
......
...@@ -9,9 +9,9 @@ import pytest ...@@ -9,9 +9,9 @@ import pytest
import torch.testing import torch.testing
import torchvision.ops import torchvision.ops
import torchvision.transforms.v2.functional as F import torchvision.transforms.v2.functional as F
from datasets_utils import combinations_grid from common_utils import (
from prototype_common_utils import (
ArgsKwargs, ArgsKwargs,
combinations_grid,
get_num_channels, get_num_channels,
ImageLoader, ImageLoader,
InfoBase, InfoBase,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment