test_datapoints.py 6.42 KB
Newer Older
1
2
from copy import deepcopy

3
4
import pytest
import torch
5
from common_utils import assert_equal
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from PIL import Image

from torchvision import datapoints


@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
    image = datapoints.Image(data)
    assert isinstance(image, torch.Tensor)
    assert image.ndim == 3 and image.shape[0] == 3


@pytest.mark.parametrize("data", [torch.randint(0, 10, size=(1, 32, 32)), Image.new("L", (32, 32), color=2)])
def test_mask_instance(data):
    mask = datapoints.Mask(data)
    assert isinstance(mask, torch.Tensor)
    assert mask.ndim == 3 and mask.shape[0] == 1


25
@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 4)), [[0, 0, 5, 5], [2, 2, 7, 7]], [1, 2, 3, 4]])
26
27
28
29
@pytest.mark.parametrize(
    "format", ["XYXY", "CXCYWH", datapoints.BoundingBoxFormat.XYXY, datapoints.BoundingBoxFormat.XYWH]
)
def test_bbox_instance(data, format):
Philip Meier's avatar
Philip Meier committed
30
    bboxes = datapoints.BoundingBoxes(data, format=format, canvas_size=(32, 32))
31
32
33
    assert isinstance(bboxes, torch.Tensor)
    assert bboxes.ndim == 2 and bboxes.shape[1] == 4
    if isinstance(format, str):
34
        format = datapoints.BoundingBoxFormat[(format.upper())]
35
    assert bboxes.format == format
36
37


38
39
40
41
42
43
def test_bbox_dim_error():
    data_3d = [[[1, 2, 3, 4]]]
    with pytest.raises(ValueError, match="Expected a 1D or 2D tensor, got 3D"):
        datapoints.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32))


44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
@pytest.mark.parametrize(
    ("data", "input_requires_grad", "expected_requires_grad"),
    [
        ([[[0.0, 1.0], [0.0, 1.0]]], None, False),
        ([[[0.0, 1.0], [0.0, 1.0]]], False, False),
        ([[[0.0, 1.0], [0.0, 1.0]]], True, True),
        (torch.rand(3, 16, 16, requires_grad=False), None, False),
        (torch.rand(3, 16, 16, requires_grad=False), False, False),
        (torch.rand(3, 16, 16, requires_grad=False), True, True),
        (torch.rand(3, 16, 16, requires_grad=True), None, True),
        (torch.rand(3, 16, 16, requires_grad=True), False, False),
        (torch.rand(3, 16, 16, requires_grad=True), True, True),
    ],
)
def test_new_requires_grad(data, input_requires_grad, expected_requires_grad):
    datapoint = datapoints.Image(data, requires_grad=input_requires_grad)
    assert datapoint.requires_grad is expected_requires_grad


def test_isinstance():
    assert isinstance(datapoints.Image(torch.rand(3, 16, 16)), torch.Tensor)


def test_wrapping_no_copy():
    tensor = torch.rand(3, 16, 16)
    image = datapoints.Image(tensor)

    assert image.data_ptr() == tensor.data_ptr()


def test_to_wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16))

    image_to = image.to(torch.float64)

    assert type(image_to) is datapoints.Image
    assert image_to.dtype is torch.float64


def test_to_datapoint_reference():
    tensor = torch.rand((3, 16, 16), dtype=torch.float64)
    image = datapoints.Image(tensor)

    tensor_to = tensor.to(image)

    assert type(tensor_to) is torch.Tensor
    assert tensor_to.dtype is torch.float64


def test_clone_wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16))

    image_clone = image.clone()

    assert type(image_clone) is datapoints.Image
    assert image_clone.data_ptr() != image.data_ptr()


def test_requires_grad__wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16))

    assert not image.requires_grad

    image_requires_grad = image.requires_grad_(True)

    assert type(image_requires_grad) is datapoints.Image
    assert image.requires_grad
    assert image_requires_grad.requires_grad


def test_detach_wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16), requires_grad=True)

    image_detached = image.detach()

    assert type(image_detached) is datapoints.Image


122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def test_no_wrapping_exceptions_with_metadata():
    # Sanity checks for the ops in _NO_WRAPPING_EXCEPTIONS and datapoints with metadata
    format, canvas_size = "XYXY", (32, 32)
    bbox = datapoints.BoundingBoxes([[0, 0, 5, 5], [2, 2, 7, 7]], format=format, canvas_size=canvas_size)

    bbox = bbox.clone()
    assert bbox.format, bbox.canvas_size == (format, canvas_size)

    bbox = bbox.to(torch.float64)
    assert bbox.format, bbox.canvas_size == (format, canvas_size)

    bbox = bbox.detach()
    assert bbox.format, bbox.canvas_size == (format, canvas_size)

    assert not bbox.requires_grad
    bbox.requires_grad_(True)
    assert bbox.format, bbox.canvas_size == (format, canvas_size)
    assert bbox.requires_grad


142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
def test_other_op_no_wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16))

    # any operation besides the ones listed in `Datapoint._NO_WRAPPING_EXCEPTIONS` will do here
    output = image * 2

    assert type(output) is torch.Tensor


@pytest.mark.parametrize(
    "op",
    [
        lambda t: t.numpy(),
        lambda t: t.tolist(),
        lambda t: t.max(dim=-1),
    ],
)
def test_no_tensor_output_op_no_wrapping(op):
    image = datapoints.Image(torch.rand(3, 16, 16))

    output = op(image)

    assert type(output) is not datapoints.Image


def test_inplace_op_no_wrapping():
    image = datapoints.Image(torch.rand(3, 16, 16))

    output = image.add_(0)

    assert type(output) is torch.Tensor
    assert type(image) is datapoints.Image


def test_wrap_like():
    image = datapoints.Image(torch.rand(3, 16, 16))

    # any operation besides the ones listed in `Datapoint._NO_WRAPPING_EXCEPTIONS` will do here
    output = image * 2

    image_new = datapoints.Image.wrap_like(image, output)

    assert type(image_new) is datapoints.Image
    assert image_new.data_ptr() == output.data_ptr()


@pytest.mark.parametrize(
    "datapoint",
    [
        datapoints.Image(torch.rand(3, 16, 16)),
        datapoints.Video(torch.rand(2, 3, 16, 16)),
Philip Meier's avatar
Philip Meier committed
193
        datapoints.BoundingBoxes([0.0, 1.0, 2.0, 3.0], format=datapoints.BoundingBoxFormat.XYXY, canvas_size=(10, 10)),
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
        datapoints.Mask(torch.randint(0, 256, (16, 16), dtype=torch.uint8)),
    ],
)
@pytest.mark.parametrize("requires_grad", [False, True])
def test_deepcopy(datapoint, requires_grad):
    if requires_grad and not datapoint.dtype.is_floating_point:
        return

    datapoint.requires_grad_(requires_grad)

    datapoint_deepcopied = deepcopy(datapoint)

    assert datapoint_deepcopied is not datapoint
    assert datapoint_deepcopied.data_ptr() != datapoint.data_ptr()
    assert_equal(datapoint_deepcopied, datapoint)

    assert type(datapoint_deepcopied) is type(datapoint)
    assert datapoint_deepcopied.requires_grad is requires_grad