test_struct_utils.py 8.47 KB
Newer Older
1
# Copyright (c) Meta Platforms, Inc. and affiliates.
Patrick Labatut's avatar
Patrick Labatut committed
2
3
4
5
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
facebook-github-bot's avatar
facebook-github-bot committed
6
7
8
9


import unittest

10
11
import torch
from pytorch3d.structures import utils as struct_utils
facebook-github-bot's avatar
facebook-github-bot committed
12

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
13
14
from .common_testing import TestCaseMixin

facebook-github-bot's avatar
facebook-github-bot committed
15
16

class TestStructUtils(TestCaseMixin, unittest.TestCase):
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    def setUp(self) -> None:
        super().setUp()
        torch.manual_seed(43)

    def _check_list_to_padded_slices(self, x, x_padded, ndim):
        N = len(x)
        for i in range(N):
            slices = [i]
            for dim in range(ndim):
                if x[i].nelement() == 0 and x[i].ndim == 1:
                    slice_ = slice(0, 0, 1)
                else:
                    slice_ = slice(0, x[i].shape[dim], 1)
                slices.append(slice_)
            if x[i].nelement() == 0 and x[i].ndim == 1:
                x_correct = x[i].new_zeros(*[[0] * ndim])
            else:
                x_correct = x[i]
            self.assertClose(x_padded[slices], x_correct)

facebook-github-bot's avatar
facebook-github-bot committed
37
38
39
40
    def test_list_to_padded(self):
        device = torch.device("cuda:0")
        N = 5
        K = 20
41
42
43
44
45
        for ndim in [1, 2, 3, 4]:
            x = []
            for _ in range(N):
                dims = torch.randint(K, size=(ndim,)).tolist()
                x.append(torch.rand(dims, device=device))
facebook-github-bot's avatar
facebook-github-bot committed
46

47
48
49
50
51
52
53
54
55
56
57
58
59
            # set 0th element to an empty 1D tensor
            x[0] = torch.tensor([], dtype=x[0].dtype, device=device)

            # set 1st element to an empty tensor with correct number of dims
            x[1] = x[1].new_zeros(*[[0] * ndim])

            pad_size = [K] * ndim
            x_padded = struct_utils.list_to_padded(
                x, pad_size=pad_size, pad_value=0.0, equisized=False
            )

            for dim in range(ndim):
                self.assertEqual(x_padded.shape[dim + 1], K)
facebook-github-bot's avatar
facebook-github-bot committed
60

61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
            self._check_list_to_padded_slices(x, x_padded, ndim)

            # check for no pad size (defaults to max dimension)
            x_padded = struct_utils.list_to_padded(x, pad_value=0.0, equisized=False)
            max_sizes = (
                max(
                    (0 if (y.nelement() == 0 and y.ndim == 1) else y.shape[dim])
                    for y in x
                )
                for dim in range(ndim)
            )
            for dim, max_size in enumerate(max_sizes):
                self.assertEqual(x_padded.shape[dim + 1], max_size)

            self._check_list_to_padded_slices(x, x_padded, ndim)

            # check for equisized
            x = [torch.rand((K, *([10] * (ndim - 1))), device=device) for _ in range(N)]
            x_padded = struct_utils.list_to_padded(x, equisized=True)
            self.assertClose(x_padded, torch.stack(x, 0))
facebook-github-bot's avatar
facebook-github-bot committed
81
82

        # catch ValueError for invalid dimensions
Jeremy Reizenstein's avatar
lint  
Jeremy Reizenstein committed
83
        pad_size = [K] * (ndim + 1)
facebook-github-bot's avatar
facebook-github-bot committed
84
85
86
87
88
89
90
91
92
93
94
95
        with self.assertRaisesRegex(ValueError, "Pad size must"):
            struct_utils.list_to_padded(
                x, pad_size=pad_size, pad_value=0.0, equisized=False
            )

        # invalid input tensor dimensions
        x = []
        ndim = 3
        for _ in range(N):
            dims = torch.randint(K, size=(ndim,)).tolist()
            x.append(torch.rand(dims, device=device))
        pad_size = [K] * 2
96
        with self.assertRaisesRegex(ValueError, "Pad size must"):
facebook-github-bot's avatar
facebook-github-bot committed
97
98
99
100
101
102
103
104
105
106
            x_padded = struct_utils.list_to_padded(
                x, pad_size=pad_size, pad_value=0.0, equisized=False
            )

    def test_padded_to_list(self):
        device = torch.device("cuda:0")
        N = 5
        K = 20
        ndim = 2

107
        for ndim in (2, 3, 4):
facebook-github-bot's avatar
facebook-github-bot committed
108

109
110
            dims = [K] * ndim
            x = torch.rand([N] + dims, device=device)
facebook-github-bot's avatar
facebook-github-bot committed
111

112
113
114
            x_list = struct_utils.padded_to_list(x)
            for i in range(N):
                self.assertClose(x_list[i], x[i])
facebook-github-bot's avatar
facebook-github-bot committed
115

116
117
118
119
120
121
122
123
124
125
126
127
128
            split_size = torch.randint(1, K, size=(N, ndim)).unbind(0)
            x_list = struct_utils.padded_to_list(x, split_size)
            for i in range(N):
                slices = [i]
                for dim in range(ndim):
                    slices.append(slice(0, split_size[i][dim], 1))
                self.assertClose(x_list[i], x[slices])

            # split size is a list of ints
            split_size = [int(z) for z in torch.randint(1, K, size=(N,)).unbind(0)]
            x_list = struct_utils.padded_to_list(x, split_size)
            for i in range(N):
                self.assertClose(x_list[i], x[i][: split_size[i]])
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156

    def test_padded_to_packed(self):
        device = torch.device("cuda:0")
        N = 5
        K = 20
        ndim = 2
        dims = [K] * ndim
        x = torch.rand([N] + dims, device=device)

        # Case 1: no split_size or pad_value provided
        # Check output is just the flattened input.
        x_packed = struct_utils.padded_to_packed(x)
        self.assertTrue(x_packed.shape == (x.shape[0] * x.shape[1], x.shape[2]))
        self.assertClose(x_packed, x.reshape(-1, K))

        # Case 2: pad_value is provided.
        # Check each section of the packed tensor matches the
        # corresponding unpadded elements of the padded tensor.
        # Check that only rows where all the values are padded
        # are removed in the conversion to packed.
        pad_value = -1
        x_list = []
        split_size = []
        for _ in range(N):
            dim = torch.randint(K, size=(1,)).item()
            # Add some random values in the input which are the same as the pad_value.
            # These should not be filtered out.
            x_list.append(
157
                torch.randint(low=pad_value, high=10, size=(dim, K), device=device)
158
159
160
161
162
163
            )
            split_size.append(dim)
        x_padded = struct_utils.list_to_padded(x_list, pad_value=pad_value)
        x_packed = struct_utils.padded_to_packed(x_padded, pad_value=pad_value)
        curr = 0
        for i in range(N):
164
            self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i])
165
166
167
168
169
170
            self.assertClose(torch.cat(x_list), x_packed)
            curr += split_size[i]

        # Case 3: split_size is provided.
        # Check each section of the packed tensor matches the corresponding
        # unpadded elements.
171
        x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
172
173
        curr = 0
        for i in range(N):
174
            self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i])
175
176
177
178
179
180
181
            self.assertClose(torch.cat(x_list), x_packed)
            curr += split_size[i]

        # Case 4: split_size of the wrong shape is provided.
        # Raise an error.
        split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0)
        with self.assertRaisesRegex(ValueError, "1-dimensional"):
182
            x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
183
184
185
186
187

        split_size = torch.randint(1, K, size=(2 * N,)).view(N * 2).tolist()
        with self.assertRaisesRegex(
            ValueError, "same length as inputs first dimension"
        ):
188
            x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
189
190
191
192
193
194
195
196
197
198

        # Case 5: both pad_value and split_size are provided.
        # Raise an error.
        with self.assertRaisesRegex(ValueError, "Only one of"):
            x_packed = struct_utils.padded_to_packed(
                x_padded, split_size=split_size, pad_value=-1
            )

        # Case 6: Input has more than 3 dims.
        # Raise an error.
Jeremy Reizenstein's avatar
lint  
Jeremy Reizenstein committed
199
200
        x = torch.rand((N, K, K, K, K), device=device)
        split_size = torch.randint(1, K, size=(N,)).tolist()
201
        with self.assertRaisesRegex(ValueError, "Supports only"):
202
            struct_utils.padded_to_packed(x, split_size=split_size)
facebook-github-bot's avatar
facebook-github-bot committed
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224

    def test_list_to_packed(self):
        device = torch.device("cuda:0")
        N = 5
        K = 20
        x, x_dims = [], []
        dim2 = torch.randint(K, size=(1,)).item()
        for _ in range(N):
            dim1 = torch.randint(K, size=(1,)).item()
            x_dims.append(dim1)
            x.append(torch.rand([dim1, dim2], device=device))

        out = struct_utils.list_to_packed(x)
        x_packed = out[0]
        num_items = out[1]
        item_packed_first_idx = out[2]
        item_packed_to_list_idx = out[3]

        cur = 0
        for i in range(N):
            self.assertTrue(num_items[i] == x_dims[i])
            self.assertTrue(item_packed_first_idx[i] == cur)
225
            self.assertTrue(item_packed_to_list_idx[cur : cur + x_dims[i]].eq(i).all())
facebook-github-bot's avatar
facebook-github-bot committed
226
227
            self.assertClose(x_packed[cur : cur + x_dims[i]], x[i])
            cur += x_dims[i]