"test/srt/vscode:/vscode.git/clone" did not exist on "7514b9f8d3660417c085538076cf5162f32ce2fb"
test_packed_to_padded.py 10.2 KB
Newer Older
1
# Copyright (c) Meta Platforms, Inc. and affiliates.
Patrick Labatut's avatar
Patrick Labatut committed
2
3
4
5
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
6
7
8

import unittest

9
import torch
10
from common_testing import get_random_cuda_device, TestCaseMixin
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from pytorch3d.ops import packed_to_padded, padded_to_packed
from pytorch3d.structures.meshes import Meshes


class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
    def setUp(self) -> None:
        super().setUp()
        torch.manual_seed(1)

    @staticmethod
    def init_meshes(
        num_meshes: int = 10,
        num_verts: int = 1000,
        num_faces: int = 3000,
        device: str = "cpu",
    ):
        device = torch.device(device)
        verts_list = []
        faces_list = []
        for _ in range(num_meshes):
31
            verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
            faces = torch.randint(
                num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
            )
            verts_list.append(verts)
            faces_list.append(faces)
        meshes = Meshes(verts_list, faces_list)

        return meshes

    @staticmethod
    def packed_to_padded_python(inputs, first_idxs, max_size, device):
        """
        PyTorch implementation of packed_to_padded function.
        """
        num_meshes = first_idxs.size(0)
        D = inputs.shape[1] if inputs.dim() == 2 else 0
        if D == 0:
            inputs_padded = torch.zeros((num_meshes, max_size), device=device)
        else:
51
            inputs_padded = torch.zeros((num_meshes, max_size, D), device=device)
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
        for m in range(num_meshes):
            s = first_idxs[m]
            if m == num_meshes - 1:
                f = inputs.shape[0]
            else:
                f = first_idxs[m + 1]
            inputs_padded[m, :f] = inputs[s:f]

        return inputs_padded

    @staticmethod
    def padded_to_packed_python(inputs, first_idxs, num_inputs, device):
        """
        PyTorch implementation of padded_to_packed function.
        """
        num_meshes = inputs.size(0)
        D = inputs.shape[2] if inputs.dim() == 3 else 0
        if D == 0:
            inputs_packed = torch.zeros((num_inputs,), device=device)
        else:
            inputs_packed = torch.zeros((num_inputs, D), device=device)
        for m in range(num_meshes):
            s = first_idxs[m]
            if m == num_meshes - 1:
                f = num_inputs
            else:
                f = first_idxs[m + 1]
            inputs_packed[s:f] = inputs[m, :f]

        return inputs_packed

    def _test_packed_to_padded_helper(self, D, device):
        """
        Check the results from packed_to_padded and PyTorch implementations
        are the same.
        """
        meshes = self.init_meshes(16, 100, 300, device=device)
        faces = meshes.faces_packed()
        mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
        max_faces = meshes.num_faces_per_mesh().max().item()

        if D == 0:
94
            values = torch.rand((faces.shape[0],), device=device, requires_grad=True)
95
        else:
96
            values = torch.rand((faces.shape[0], D), device=device, requires_grad=True)
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        values_torch = values.detach().clone()
        values_torch.requires_grad = True
        values_padded = packed_to_padded(
            values, mesh_to_faces_packed_first_idx, max_faces
        )
        values_padded_torch = TestPackedToPadded.packed_to_padded_python(
            values_torch, mesh_to_faces_packed_first_idx, max_faces, device
        )
        # check forward
        self.assertClose(values_padded, values_padded_torch)

        # check backward
        if D == 0:
            grad_inputs = torch.rand((len(meshes), max_faces), device=device)
        else:
            grad_inputs = torch.rand((len(meshes), max_faces, D), device=device)
        values_padded.backward(grad_inputs)
        grad_outputs = values.grad
        values_padded_torch.backward(grad_inputs)
        grad_outputs_torch1 = values_torch.grad
        grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python(
118
            grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device
119
120
121
122
123
124
125
126
127
128
129
130
131
132
        )
        self.assertClose(grad_outputs, grad_outputs_torch1)
        self.assertClose(grad_outputs, grad_outputs_torch2)

    def test_packed_to_padded_flat_cpu(self):
        self._test_packed_to_padded_helper(0, "cpu")

    def test_packed_to_padded_D1_cpu(self):
        self._test_packed_to_padded_helper(1, "cpu")

    def test_packed_to_padded_D16_cpu(self):
        self._test_packed_to_padded_helper(16, "cpu")

    def test_packed_to_padded_flat_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
133
134
        device = get_random_cuda_device()
        self._test_packed_to_padded_helper(0, device)
135
136

    def test_packed_to_padded_D1_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
137
138
        device = get_random_cuda_device()
        self._test_packed_to_padded_helper(1, device)
139
140

    def test_packed_to_padded_D16_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
141
142
        device = get_random_cuda_device()
        self._test_packed_to_padded_helper(16, device)
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162

    def _test_padded_to_packed_helper(self, D, device):
        """
        Check the results from packed_to_padded and PyTorch implementations
        are the same.
        """
        meshes = self.init_meshes(16, 100, 300, device=device)
        mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
        num_faces_per_mesh = meshes.num_faces_per_mesh()
        max_faces = num_faces_per_mesh.max().item()
        if D == 0:
            values = torch.rand((len(meshes), max_faces), device=device)
        else:
            values = torch.rand((len(meshes), max_faces, D), device=device)
        for i, num in enumerate(num_faces_per_mesh):
            values[i, num:] = 0
        values.requires_grad = True
        values_torch = values.detach().clone()
        values_torch.requires_grad = True
        values_packed = padded_to_packed(
163
            values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item()
164
165
166
167
168
169
170
171
172
173
174
175
        )
        values_packed_torch = TestPackedToPadded.padded_to_packed_python(
            values_torch,
            mesh_to_faces_packed_first_idx,
            num_faces_per_mesh.sum().item(),
            device,
        )
        # check forward
        self.assertClose(values_packed, values_packed_torch)

        # check backward
        if D == 0:
176
            grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device)
177
178
179
180
181
182
183
184
185
        else:
            grad_inputs = torch.rand(
                (num_faces_per_mesh.sum().item(), D), device=device
            )
        values_packed.backward(grad_inputs)
        grad_outputs = values.grad
        values_packed_torch.backward(grad_inputs)
        grad_outputs_torch1 = values_torch.grad
        grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python(
186
            grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device
187
188
189
190
191
192
193
194
195
196
197
198
199
200
        )
        self.assertClose(grad_outputs, grad_outputs_torch1)
        self.assertClose(grad_outputs, grad_outputs_torch2)

    def test_padded_to_packed_flat_cpu(self):
        self._test_padded_to_packed_helper(0, "cpu")

    def test_padded_to_packed_D1_cpu(self):
        self._test_padded_to_packed_helper(1, "cpu")

    def test_padded_to_packed_D16_cpu(self):
        self._test_padded_to_packed_helper(16, "cpu")

    def test_padded_to_packed_flat_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
201
202
        device = get_random_cuda_device()
        self._test_padded_to_packed_helper(0, device)
203
204

    def test_padded_to_packed_D1_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
205
206
        device = get_random_cuda_device()
        self._test_padded_to_packed_helper(1, device)
207
208

    def test_padded_to_packed_D16_cuda(self):
Nikhila Ravi's avatar
Nikhila Ravi committed
209
210
        device = get_random_cuda_device()
        self._test_padded_to_packed_helper(16, device)
211
212

    def test_invalid_inputs_shapes(self, device="cuda:0"):
213
        with self.assertRaisesRegex(ValueError, "input can only be 2-dimensional."):
214
215
216
217
            values = torch.rand((100, 50, 2), device=device)
            first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
            packed_to_padded(values, first_idxs, 100)

218
        with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
219
220
221
222
            values = torch.rand((100,), device=device)
            first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
            padded_to_packed(values, first_idxs, 20)

223
        with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
224
225
226
227
228
229
            values = torch.rand((100, 50, 2, 2), device=device)
            first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
            padded_to_packed(values, first_idxs, 20)

    @staticmethod
    def packed_to_padded_with_init(
230
        num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
    ):
        meshes = TestPackedToPadded.init_meshes(
            num_meshes, num_verts, num_faces, device
        )
        faces = meshes.faces_packed()
        mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
        max_faces = meshes.num_faces_per_mesh().max().item()
        if num_d == 0:
            values = torch.rand((faces.shape[0],), device=meshes.device)
        else:
            values = torch.rand((faces.shape[0], num_d), device=meshes.device)
        torch.cuda.synchronize()

        def out():
            packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces)
            torch.cuda.synchronize()

        return out

    @staticmethod
    def packed_to_padded_with_init_torch(
252
        num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
    ):
        meshes = TestPackedToPadded.init_meshes(
            num_meshes, num_verts, num_faces, device
        )
        faces = meshes.faces_packed()
        mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
        max_faces = meshes.num_faces_per_mesh().max().item()
        if num_d == 0:
            values = torch.rand((faces.shape[0],), device=meshes.device)
        else:
            values = torch.rand((faces.shape[0], num_d), device=meshes.device)
        torch.cuda.synchronize()

        def out():
            TestPackedToPadded.packed_to_padded_python(
                values, mesh_to_faces_packed_first_idx, max_faces, device
            )
            torch.cuda.synchronize()

        return out