test_cameras.py 66.3 KB
Newer Older
1
# Copyright (c) Meta Platforms, Inc. and affiliates.
Patrick Labatut's avatar
Patrick Labatut committed
2
3
4
5
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
facebook-github-bot's avatar
facebook-github-bot committed
6

7
8
# @licenselint-loose-mode

facebook-github-bot's avatar
facebook-github-bot committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# Some of the code below is adapted from Soft Rasterizer (SoftRas)
#
# Copyright (c) 2017 Hiroharu Kato
# Copyright (c) 2018 Nikos Kolotouros
# Copyright (c) 2019 Shichen Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import math
David Novotny's avatar
David Novotny committed
34
import typing
facebook-github-bot's avatar
facebook-github-bot committed
35
import unittest
36
from itertools import product
facebook-github-bot's avatar
facebook-github-bot committed
37

38
39
import numpy as np
import torch
Jiali Duan's avatar
Jiali Duan committed
40
from pytorch3d.common.datatypes import Device
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
41
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
facebook-github-bot's avatar
facebook-github-bot committed
42
from pytorch3d.renderer.cameras import (
43
    camera_position_from_spherical_angles,
44
    CamerasBase,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
45
46
    FoVOrthographicCameras,
    FoVPerspectiveCameras,
47
48
49
    get_world_to_view_transform,
    look_at_rotation,
    look_at_view_transform,
Jeremy Reizenstein's avatar
lint  
Jeremy Reizenstein committed
50
51
    OpenGLOrthographicCameras,
    OpenGLPerspectiveCameras,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
52
53
    OrthographicCameras,
    PerspectiveCameras,
Jeremy Reizenstein's avatar
lint  
Jeremy Reizenstein committed
54
55
    SfMOrthographicCameras,
    SfMPerspectiveCameras,
facebook-github-bot's avatar
facebook-github-bot committed
56
)
Jiali Duan's avatar
Jiali Duan committed
57
from pytorch3d.renderer.fisheyecameras import FishEyeCameras
facebook-github-bot's avatar
facebook-github-bot committed
58
from pytorch3d.transforms import Transform3d
David Novotny's avatar
David Novotny committed
59
from pytorch3d.transforms.rotation_conversions import random_rotations
60
from pytorch3d.transforms.so3 import so3_exp_map
facebook-github-bot's avatar
facebook-github-bot committed
61

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
62
63
from .common_testing import TestCaseMixin

facebook-github-bot's avatar
facebook-github-bot committed
64
65
66
67
68
69
70
71
72
73
74
75
76

# Naive function adapted from SoftRasterizer for test purposes.
def perspective_project_naive(points, fov=60.0):
    """
    Compute perspective projection from a given viewing angle.
    Args:
        points: (N, V, 3) representing the padded points.
        viewing angle: degrees
    Returns:
        (N, V, 3) tensor of projected points preserving the view space z
        coordinate (no z renormalization)
    """
    device = points.device
77
    halfFov = torch.tensor((fov / 2) / 180 * np.pi, dtype=torch.float32, device=device)
facebook-github-bot's avatar
facebook-github-bot committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    scale = torch.tan(halfFov[None])
    scale = scale[:, None]
    z = points[:, :, 2]
    x = points[:, :, 0] / z / scale
    y = points[:, :, 1] / z / scale
    points = torch.stack((x, y, z), dim=2)
    return points


def sfm_perspective_project_naive(points, fx=1.0, fy=1.0, p0x=0.0, p0y=0.0):
    """
    Compute perspective projection using focal length and principal point.

    Args:
        points: (N, V, 3) representing the padded points.
        fx: world units
        fy: world units
        p0x: pixels
        p0y: pixels
    Returns:
        (N, V, 3) tensor of projected points.
    """
    z = points[:, :, 2]
101
102
    x = (points[:, :, 0] * fx) / z + p0x
    y = (points[:, :, 1] * fy) / z + p0y
facebook-github-bot's avatar
facebook-github-bot committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    points = torch.stack((x, y, 1.0 / z), dim=2)
    return points


# Naive function adapted from SoftRasterizer for test purposes.
def orthographic_project_naive(points, scale_xyz=(1.0, 1.0, 1.0)):
    """
    Compute orthographic projection from a given angle
    Args:
        points: (N, V, 3) representing the padded points.
        scaled: (N, 3) scaling factors for each of xyz directions
    Returns:
        (N, V, 3) tensor of projected points preserving the view space z
        coordinate (no z renormalization).
    """
    if not torch.is_tensor(scale_xyz):
        scale_xyz = torch.tensor(scale_xyz)
    scale_xyz = scale_xyz.view(-1, 3)
    z = points[:, :, 2]
    x = points[:, :, 0] * scale_xyz[:, 0]
    y = points[:, :, 1] * scale_xyz[:, 1]
    points = torch.stack((x, y, z), dim=2)
    return points


Georgia Gkioxari's avatar
Georgia Gkioxari committed
128
129
130
131
132
def ndc_to_screen_points_naive(points, imsize):
    """
    Transforms points from PyTorch3D's NDC space to screen space
    Args:
        points: (N, V, 3) representing padded points
133
        imsize: (N, 2) image size = (height, width)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
134
135
136
    Returns:
        (N, V, 3) tensor of transformed points
    """
137
138
    height, width = imsize.unbind(1)
    width = width.view(-1, 1)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
139
    half_width = width / 2.0
140
    height = height.view(-1, 1)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
141
    half_height = height / 2.0
142
143
144
145

    scale = (
        half_width * (height > width).float() + half_height * (height <= width).float()
    )
Georgia Gkioxari's avatar
Georgia Gkioxari committed
146
147

    x, y, z = points.unbind(2)
148
149
    x = -scale * x + half_width
    y = -scale * y + half_height
Georgia Gkioxari's avatar
Georgia Gkioxari committed
150
151
152
    return torch.stack((x, y, z), dim=2)


David Novotny's avatar
David Novotny committed
153
def init_random_cameras(
Jiali Duan's avatar
Jiali Duan committed
154
155
156
157
    cam_type: typing.Type[CamerasBase],
    batch_size: int,
    random_z: bool = False,
    device: Device = "cpu",
David Novotny's avatar
David Novotny committed
158
159
160
161
162
):
    cam_params = {}
    T = torch.randn(batch_size, 3) * 0.03
    if not random_z:
        T[:, 2] = 4
163
    R = so3_exp_map(torch.randn(batch_size, 3) * 3.0)
Jiali Duan's avatar
Jiali Duan committed
164
    cam_params = {"R": R, "T": T, "device": device}
David Novotny's avatar
David Novotny committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
        cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
        cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
        if cam_type == OpenGLPerspectiveCameras:
            cam_params["fov"] = torch.rand(batch_size) * 60 + 30
            cam_params["aspect_ratio"] = torch.rand(batch_size) * 0.5 + 0.5
        else:
            cam_params["top"] = torch.rand(batch_size) * 0.2 + 0.9
            cam_params["bottom"] = -(torch.rand(batch_size)) * 0.2 - 0.9
            cam_params["left"] = -(torch.rand(batch_size)) * 0.2 - 0.9
            cam_params["right"] = torch.rand(batch_size) * 0.2 + 0.9
    elif cam_type in (FoVPerspectiveCameras, FoVOrthographicCameras):
        cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
        cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
        if cam_type == FoVPerspectiveCameras:
            cam_params["fov"] = torch.rand(batch_size) * 60 + 30
            cam_params["aspect_ratio"] = torch.rand(batch_size) * 0.5 + 0.5
        else:
            cam_params["max_y"] = torch.rand(batch_size) * 0.2 + 0.9
            cam_params["min_y"] = -(torch.rand(batch_size)) * 0.2 - 0.9
            cam_params["min_x"] = -(torch.rand(batch_size)) * 0.2 - 0.9
            cam_params["max_x"] = torch.rand(batch_size) * 0.2 + 0.9
    elif cam_type in (
        SfMOrthographicCameras,
        SfMPerspectiveCameras,
        OrthographicCameras,
        PerspectiveCameras,
    ):
        cam_params["focal_length"] = torch.rand(batch_size) * 10 + 0.1
        cam_params["principal_point"] = torch.randn((batch_size, 2))
Jiali Duan's avatar
Jiali Duan committed
195
196
197
198
199
200
    elif cam_type == FishEyeCameras:
        cam_params["focal_length"] = torch.rand(batch_size, 1) * 10 + 0.1
        cam_params["principal_point"] = torch.randn((batch_size, 2))
        cam_params["radial_params"] = torch.randn((batch_size, 6))
        cam_params["tangential_params"] = torch.randn((batch_size, 2))
        cam_params["thin_prism_params"] = torch.randn((batch_size, 4))
David Novotny's avatar
David Novotny committed
201
202
203
204
205
206

    else:
        raise ValueError(str(cam_type))
    return cam_type(**cam_params)


Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
207
class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
208
209
210
211
    def setUp(self) -> None:
        super().setUp()
        torch.manual_seed(42)

212
213
214
215
    def test_look_at_view_transform_from_eye_point_tuple(self):
        dist = math.sqrt(2)
        elev = math.pi / 4
        azim = 0.0
Georgia Gkioxari's avatar
Georgia Gkioxari committed
216
        eye = ((0.0, 1.0, 1.0),)
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
        # using passed values for dist, elev, azim
        R, t = look_at_view_transform(dist, elev, azim, degrees=False)
        # using other values for dist, elev, azim - eye overrides
        R_eye, t_eye = look_at_view_transform(dist=3, elev=2, azim=1, eye=eye)
        # using only eye value

        R_eye_only, t_eye_only = look_at_view_transform(eye=eye)
        self.assertTrue(torch.allclose(R, R_eye, atol=2e-7))
        self.assertTrue(torch.allclose(t, t_eye, atol=2e-7))
        self.assertTrue(torch.allclose(R, R_eye_only, atol=2e-7))
        self.assertTrue(torch.allclose(t, t_eye_only, atol=2e-7))

    def test_look_at_view_transform_default_values(self):
        dist = 1.0
        elev = 0.0
        azim = 0.0
        # Using passed values for dist, elev, azim
        R, t = look_at_view_transform(dist, elev, azim)
        # Using default dist=1.0, elev=0.0, azim=0.0
        R_default, t_default = look_at_view_transform()
        # test default = passed = expected
        self.assertTrue(torch.allclose(R, R_default, atol=2e-7))
        self.assertTrue(torch.allclose(t, t_default, atol=2e-7))

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
    def test_look_at_view_transform_non_default_at_position(self):
        dist = 1.0
        elev = 0.0
        azim = 0.0
        at = ((1, 1, 1),)
        # Using passed values for dist, elev, azim, at
        R, t = look_at_view_transform(dist, elev, azim, at=at)
        # Using default dist=1.0, elev=0.0, azim=0.0
        R_default, t_default = look_at_view_transform()
        # test default = passed = expected
        # R must be the same, t must be translated by (1,-1,1) with respect to t_default
        t_trans = torch.tensor([1, -1, 1], dtype=torch.float32).view(1, 3)
        self.assertTrue(torch.allclose(R, R_default, atol=2e-7))
        self.assertTrue(torch.allclose(t, t_default + t_trans, atol=2e-7))

facebook-github-bot's avatar
facebook-github-bot committed
256
257
258
259
    def test_camera_position_from_angles_python_scalar(self):
        dist = 2.7
        elev = 90.0
        azim = 0.0
260
261
262
        expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32).view(
            1, 3
        )
facebook-github-bot's avatar
facebook-github-bot committed
263
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
264
        self.assertClose(position, expected_position, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
265
266
267
268
269
270
271
272
273
274

    def test_camera_position_from_angles_python_scalar_radians(self):
        dist = 2.7
        elev = math.pi / 2
        azim = 0.0
        expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32)
        expected_position = expected_position.view(1, 3)
        position = camera_position_from_spherical_angles(
            dist, elev, azim, degrees=False
        )
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
275
        self.assertClose(position, expected_position, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
276
277
278
279
280

    def test_camera_position_from_angles_torch_scalars(self):
        dist = torch.tensor(2.7)
        elev = torch.tensor(0.0)
        azim = torch.tensor(90.0)
281
282
283
        expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
            1, 3
        )
facebook-github-bot's avatar
facebook-github-bot committed
284
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
285
        self.assertClose(position, expected_position, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
286
287
288
289
290

    def test_camera_position_from_angles_mixed_scalars(self):
        dist = 2.7
        elev = torch.tensor(0.0)
        azim = 90.0
291
292
293
        expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
            1, 3
        )
facebook-github-bot's avatar
facebook-github-bot committed
294
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
295
        self.assertClose(position, expected_position, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311

    def test_camera_position_from_angles_torch_scalar_grads(self):
        dist = torch.tensor(2.7, requires_grad=True)
        elev = torch.tensor(45.0, requires_grad=True)
        azim = torch.tensor(45.0)
        position = camera_position_from_spherical_angles(dist, elev, azim)
        position.sum().backward()
        self.assertTrue(hasattr(elev, "grad"))
        self.assertTrue(hasattr(dist, "grad"))
        elev_grad = elev.grad.clone()
        dist_grad = dist.grad.clone()
        elev = math.pi / 180.0 * elev.detach()
        azim = math.pi / 180.0 * azim
        grad_dist = (
            torch.cos(elev) * torch.sin(azim)
            + torch.sin(elev)
312
            + torch.cos(elev) * torch.cos(azim)
facebook-github-bot's avatar
facebook-github-bot committed
313
314
        )
        grad_elev = (
Nikhila Ravi's avatar
Nikhila Ravi committed
315
            -(torch.sin(elev)) * torch.sin(azim)
facebook-github-bot's avatar
facebook-github-bot committed
316
            + torch.cos(elev)
317
            - torch.sin(elev) * torch.cos(azim)
facebook-github-bot's avatar
facebook-github-bot committed
318
319
        )
        grad_elev = dist * (math.pi / 180.0) * grad_elev
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
320
321
        self.assertClose(elev_grad, grad_elev)
        self.assertClose(dist_grad, grad_dist)
facebook-github-bot's avatar
facebook-github-bot committed
322
323
324
325
326
327
328
329
330

    def test_camera_position_from_angles_vectors(self):
        dist = torch.tensor([2.0, 2.0])
        elev = torch.tensor([0.0, 90.0])
        azim = torch.tensor([90.0, 0.0])
        expected_position = torch.tensor(
            [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=torch.float32
        )
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
331
        self.assertClose(position, expected_position, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
332
333
334
335
336
337

    def test_camera_position_from_angles_vectors_broadcast(self):
        dist = torch.tensor([2.0, 3.0, 5.0])
        elev = torch.tensor([0.0])
        azim = torch.tensor([90.0])
        expected_position = torch.tensor(
338
            [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
facebook-github-bot's avatar
facebook-github-bot committed
339
340
        )
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
341
        self.assertClose(position, expected_position, atol=3e-7)
facebook-github-bot's avatar
facebook-github-bot committed
342
343
344
345
346
347

    def test_camera_position_from_angles_vectors_mixed_broadcast(self):
        dist = torch.tensor([2.0, 3.0, 5.0])
        elev = 0.0
        azim = torch.tensor(90.0)
        expected_position = torch.tensor(
348
            [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
facebook-github-bot's avatar
facebook-github-bot committed
349
350
        )
        position = camera_position_from_spherical_angles(dist, elev, azim)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
351
        self.assertClose(position, expected_position, atol=3e-7)
facebook-github-bot's avatar
facebook-github-bot committed
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

    def test_camera_position_from_angles_vectors_mixed_broadcast_grads(self):
        dist = torch.tensor([2.0, 3.0, 5.0], requires_grad=True)
        elev = torch.tensor(45.0, requires_grad=True)
        azim = 45.0
        position = camera_position_from_spherical_angles(dist, elev, azim)
        position.sum().backward()
        self.assertTrue(hasattr(elev, "grad"))
        self.assertTrue(hasattr(dist, "grad"))
        elev_grad = elev.grad.clone()
        dist_grad = dist.grad.clone()
        azim = torch.tensor(azim)
        elev = math.pi / 180.0 * elev.detach()
        azim = math.pi / 180.0 * azim
        grad_dist = (
            torch.cos(elev) * torch.sin(azim)
            + torch.sin(elev)
369
            + torch.cos(elev) * torch.cos(azim)
facebook-github-bot's avatar
facebook-github-bot committed
370
371
        )
        grad_elev = (
Nikhila Ravi's avatar
Nikhila Ravi committed
372
            -(torch.sin(elev)) * torch.sin(azim)
facebook-github-bot's avatar
facebook-github-bot committed
373
            + torch.cos(elev)
374
            - torch.sin(elev) * torch.cos(azim)
facebook-github-bot's avatar
facebook-github-bot committed
375
376
        )
        grad_elev = (dist * (math.pi / 180.0) * grad_elev).sum()
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
377
378
        self.assertClose(elev_grad, grad_elev)
        self.assertClose(dist_grad, torch.full([3], grad_dist))
facebook-github-bot's avatar
facebook-github-bot committed
379
380
381
382
383
384
385
386
387
388
389
390

    def test_camera_position_from_angles_vectors_bad_broadcast(self):
        # Batch dim for broadcast must be N or 1
        dist = torch.tensor([2.0, 3.0, 5.0])
        elev = torch.tensor([0.0, 90.0])
        azim = torch.tensor([90.0])
        with self.assertRaises(ValueError):
            camera_position_from_spherical_angles(dist, elev, azim)

    def test_look_at_rotation_python_list(self):
        camera_position = [[0.0, 0.0, -1.0]]  # camera pointing along negative z
        rot_mat = look_at_rotation(camera_position)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
391
        self.assertClose(rot_mat, torch.eye(3)[None], atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417

    def test_look_at_rotation_input_fail(self):
        camera_position = [-1.0]  # expected to have xyz positions
        with self.assertRaises(ValueError):
            look_at_rotation(camera_position)

    def test_look_at_rotation_list_broadcast(self):
        # fmt: off
        camera_positions = [[0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]
        rot_mats_expected = torch.tensor(
            [
                [
                    [1.0, 0.0, 0.0],
                    [0.0, 1.0, 0.0],
                    [0.0, 0.0, 1.0]
                ],
                [
                    [-1.0, 0.0,  0.0],  # noqa: E241, E201
                    [ 0.0, 1.0,  0.0],  # noqa: E241, E201
                    [ 0.0, 0.0, -1.0]   # noqa: E241, E201
                ],
            ],
            dtype=torch.float32
        )
        # fmt: on
        rot_mats = look_at_rotation(camera_positions)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
418
        self.assertClose(rot_mats, rot_mats_expected, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442

    def test_look_at_rotation_tensor_broadcast(self):
        # fmt: off
        camera_positions = torch.tensor([
            [0.0, 0.0, -1.0],
            [0.0, 0.0,  1.0]   # noqa: E241, E201
        ], dtype=torch.float32)
        rot_mats_expected = torch.tensor(
            [
                [
                    [1.0, 0.0, 0.0],
                    [0.0, 1.0, 0.0],
                    [0.0, 0.0, 1.0]
                ],
                [
                    [-1.0, 0.0,  0.0],  # noqa: E241, E201
                    [ 0.0, 1.0,  0.0],  # noqa: E241, E201
                    [ 0.0, 0.0, -1.0]   # noqa: E241, E201
                ],
            ],
            dtype=torch.float32
        )
        # fmt: on
        rot_mats = look_at_rotation(camera_positions)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
443
        self.assertClose(rot_mats, rot_mats_expected, atol=2e-7)
facebook-github-bot's avatar
facebook-github-bot committed
444
445
446
447
448
449

    def test_look_at_rotation_tensor_grad(self):
        camera_position = torch.tensor([[0.0, 0.0, -1.0]], requires_grad=True)
        rot_mat = look_at_rotation(camera_position)
        rot_mat.sum().backward()
        self.assertTrue(hasattr(camera_position, "grad"))
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
450
451
        self.assertClose(
            camera_position.grad, torch.zeros_like(camera_position), atol=2e-7
facebook-github-bot's avatar
facebook-github-bot committed
452
453
454
455
456
457
458
459
        )

    def test_view_transform(self):
        T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
        R = look_at_rotation(T)
        RT = get_world_to_view_transform(R=R, T=T)
        self.assertTrue(isinstance(RT, Transform3d))

Amitav Baruah's avatar
Amitav Baruah committed
460
461
462
463
464
465
466
467
468
469
470
471
472
473
    def test_look_at_view_transform_corner_case(self):
        dist = 2.7
        elev = 90
        azim = 90
        expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32).view(
            1, 3
        )
        position = camera_position_from_spherical_angles(dist, elev, azim)
        self.assertClose(position, expected_position, atol=2e-7)
        R, _ = look_at_view_transform(eye=position)
        x_axis = R[:, :, 0]
        expected_x_axis = torch.tensor([0.0, 0.0, -1.0], dtype=torch.float32).view(1, 3)
        self.assertClose(x_axis, expected_x_axis, atol=5e-3)

474
475

class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
476
477
478
479
480
481
482
483
484
485
486
487
488
489
    def test_K(self, batch_size=10):
        T = torch.randn(batch_size, 3)
        R = random_rotations(batch_size)
        K = torch.randn(batch_size, 4, 4)
        for cam_type in (
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
        ):
            cam = cam_type(R=R, T=T, K=K)
            cam.get_projection_transform()
            # Just checking that we don't crash or anything

facebook-github-bot's avatar
facebook-github-bot committed
490
491
492
493
494
495
496
497
498
    def test_view_transform_class_method(self):
        T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
        R = look_at_rotation(T)
        RT = get_world_to_view_transform(R=R, T=T)
        for cam_type in (
            OpenGLPerspectiveCameras,
            OpenGLOrthographicCameras,
            SfMOrthographicCameras,
            SfMPerspectiveCameras,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
499
500
501
502
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
facebook-github-bot's avatar
facebook-github-bot committed
503
504
505
        ):
            cam = cam_type(R=R, T=T)
            RT_class = cam.get_world_to_view_transform()
506
            self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix()))
facebook-github-bot's avatar
facebook-github-bot committed
507
508
509
510
511

        self.assertTrue(isinstance(RT, Transform3d))

    def test_get_camera_center(self, batch_size=10):
        T = torch.randn(batch_size, 3)
David Novotny's avatar
David Novotny committed
512
        R = random_rotations(batch_size)
facebook-github-bot's avatar
facebook-github-bot committed
513
514
515
516
517
        for cam_type in (
            OpenGLPerspectiveCameras,
            OpenGLOrthographicCameras,
            SfMOrthographicCameras,
            SfMPerspectiveCameras,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
518
519
520
521
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
facebook-github-bot's avatar
facebook-github-bot committed
522
523
524
525
526
527
        ):
            cam = cam_type(R=R, T=T)
            C = cam.get_camera_center()
            C_ = -torch.bmm(R, T[:, :, None])[:, :, 0]
            self.assertTrue(torch.allclose(C, C_, atol=1e-05))

Georgia Gkioxari's avatar
Georgia Gkioxari committed
528
529
530
531
    @staticmethod
    def init_equiv_cameras_ndc_screen(cam_type: CamerasBase, batch_size: int):
        T = torch.randn(batch_size, 3) * 0.03
        T[:, 2] = 4
532
        R = so3_exp_map(torch.randn(batch_size, 3) * 3.0)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
533
534
535
        screen_cam_params = {"R": R, "T": T}
        ndc_cam_params = {"R": R, "T": T}
        if cam_type in (OrthographicCameras, PerspectiveCameras):
536
537
538
            fcl = torch.rand((batch_size, 2)) * 3.0 + 0.1
            prc = torch.randn((batch_size, 2)) * 0.2
            # (height, width)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
539
            image_size = torch.randint(low=2, high=64, size=(batch_size, 2))
540
            # scale
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
541
            scale = (image_size.min(dim=1, keepdim=True).values) / 2.0
542
543
544
545
546

            ndc_cam_params["focal_length"] = fcl
            ndc_cam_params["principal_point"] = prc
            ndc_cam_params["image_size"] = image_size

Georgia Gkioxari's avatar
Georgia Gkioxari committed
547
            screen_cam_params["image_size"] = image_size
548
            screen_cam_params["focal_length"] = fcl * scale
Georgia Gkioxari's avatar
Georgia Gkioxari committed
549
            screen_cam_params["principal_point"] = (
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
550
                image_size[:, [1, 0]]
551
552
            ) / 2.0 - prc * scale
            screen_cam_params["in_ndc"] = False
Georgia Gkioxari's avatar
Georgia Gkioxari committed
553
554
555
556
        else:
            raise ValueError(str(cam_type))
        return cam_type(**ndc_cam_params), cam_type(**screen_cam_params)

557
558
559
560
561
562
563
564
565
566
567
    def test_unproject_points(self, batch_size=50, num_points=100):
        """
        Checks that an unprojection of a randomly projected point cloud
        stays the same.
        """

        for cam_type in (
            SfMOrthographicCameras,
            OpenGLPerspectiveCameras,
            OpenGLOrthographicCameras,
            SfMPerspectiveCameras,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
568
569
570
571
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
572
573
        ):
            # init the cameras
David Novotny's avatar
David Novotny committed
574
            cameras = init_random_cameras(cam_type, batch_size)
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
            # xyz - the ground truth point cloud
            xyz = torch.randn(batch_size, num_points, 3) * 0.3
            # xyz in camera coordinates
            xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz)
            # depth = z-component of xyz_cam
            depth = xyz_cam[:, :, 2:]
            # project xyz
            xyz_proj = cameras.transform_points(xyz)
            xy, cam_depth = xyz_proj.split(2, dim=2)
            # input to the unprojection function
            xy_depth = torch.cat((xy, depth), dim=2)

            for to_world in (False, True):
                if to_world:
                    matching_xyz = xyz
                else:
                    matching_xyz = xyz_cam

Georgia Gkioxari's avatar
Georgia Gkioxari committed
593
                # if we have FoV (= OpenGL) cameras
594
                # test for scaled_depth_input=True/False
Georgia Gkioxari's avatar
Georgia Gkioxari committed
595
596
597
598
599
600
                if cam_type in (
                    OpenGLPerspectiveCameras,
                    OpenGLOrthographicCameras,
                    FoVPerspectiveCameras,
                    FoVOrthographicCameras,
                ):
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
                    for scaled_depth_input in (True, False):
                        if scaled_depth_input:
                            xy_depth_ = xyz_proj
                        else:
                            xy_depth_ = xy_depth
                        xyz_unproj = cameras.unproject_points(
                            xy_depth_,
                            world_coordinates=to_world,
                            scaled_depth_input=scaled_depth_input,
                        )
                        self.assertTrue(
                            torch.allclose(xyz_unproj, matching_xyz, atol=1e-4)
                        )
                else:
                    xyz_unproj = cameras.unproject_points(
                        xy_depth, world_coordinates=to_world
                    )
                    self.assertTrue(torch.allclose(xyz_unproj, matching_xyz, atol=1e-4))

Jiali Duan's avatar
Jiali Duan committed
620
    @staticmethod
Jiali Duan's avatar
Jiali Duan committed
621
622
623
    def unproject_points(
        cam_type, batch_size=50, num_points=100, device: Device = "cpu"
    ):
Jiali Duan's avatar
Jiali Duan committed
624
625
626
627
        """
        Checks that an unprojection of a randomly projected point cloud
        stays the same.
        """
Jiali Duan's avatar
Jiali Duan committed
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
        if device == "cuda":
            device = torch.device("cuda:0")
        else:
            device = torch.device("cpu")

        str2cls = {  # noqa
            "OpenGLOrthographicCameras": OpenGLOrthographicCameras,
            "OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
            "SfMOrthographicCameras": SfMOrthographicCameras,
            "SfMPerspectiveCameras": SfMPerspectiveCameras,
            "FoVOrthographicCameras": FoVOrthographicCameras,
            "FoVPerspectiveCameras": FoVPerspectiveCameras,
            "OrthographicCameras": OrthographicCameras,
            "PerspectiveCameras": PerspectiveCameras,
            "FishEyeCameras": FishEyeCameras,
        }
Jiali Duan's avatar
Jiali Duan committed
644
645
646

        def run_cameras():
            # init the cameras
Jiali Duan's avatar
Jiali Duan committed
647
            cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
Jiali Duan's avatar
Jiali Duan committed
648
649
650
651
652
653
            # xyz - the ground truth point cloud
            xyz = torch.randn(num_points, 3) * 0.3
            xyz = cameras.unproject_points(xyz, scaled_depth_input=True)

        return run_cameras

Georgia Gkioxari's avatar
Georgia Gkioxari committed
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
    def test_project_points_screen(self, batch_size=50, num_points=100):
        """
        Checks that an unprojection of a randomly projected point cloud
        stays the same.
        """

        for cam_type in (
            OpenGLOrthographicCameras,
            OpenGLPerspectiveCameras,
            SfMOrthographicCameras,
            SfMPerspectiveCameras,
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
        ):

            # init the cameras
David Novotny's avatar
David Novotny committed
672
            cameras = init_random_cameras(cam_type, batch_size)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
673
            # xyz - the ground truth point cloud
674
675
676
            xy = torch.randn(batch_size, num_points, 2) * 2.0 - 1.0
            z = torch.randn(batch_size, num_points, 1) * 3.0 + 1.0
            xyz = torch.cat((xy, z), dim=2)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
677
            # image size
678
            image_size = torch.randint(low=32, high=64, size=(batch_size, 2))
Georgia Gkioxari's avatar
Georgia Gkioxari committed
679
            # project points
680
681
682
683
            xyz_project_ndc = cameras.transform_points_ndc(xyz)
            xyz_project_screen = cameras.transform_points_screen(
                xyz, image_size=image_size
            )
Georgia Gkioxari's avatar
Georgia Gkioxari committed
684
685
686
687
            # naive
            xyz_project_screen_naive = ndc_to_screen_points_naive(
                xyz_project_ndc, image_size
            )
688
            # we set atol to 1e-4, remember that screen points are in [0, W]x[0, H] space
689
            self.assertClose(xyz_project_screen, xyz_project_screen_naive, atol=1e-4)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
690

Jiali Duan's avatar
Jiali Duan committed
691
    @staticmethod
Jiali Duan's avatar
Jiali Duan committed
692
693
694
    def transform_points(
        cam_type, batch_size=50, num_points=100, device: Device = "cpu"
    ):
Jiali Duan's avatar
Jiali Duan committed
695
696
697
698
699
        """
        Checks that an unprojection of a randomly projected point cloud
        stays the same.
        """

Jiali Duan's avatar
Jiali Duan committed
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
        if device == "cuda":
            device = torch.device("cuda:0")
        else:
            device = torch.device("cpu")
        str2cls = {  # noqa
            "OpenGLOrthographicCameras": OpenGLOrthographicCameras,
            "OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
            "SfMOrthographicCameras": SfMOrthographicCameras,
            "SfMPerspectiveCameras": SfMPerspectiveCameras,
            "FoVOrthographicCameras": FoVOrthographicCameras,
            "FoVPerspectiveCameras": FoVPerspectiveCameras,
            "OrthographicCameras": OrthographicCameras,
            "PerspectiveCameras": PerspectiveCameras,
            "FishEyeCameras": FishEyeCameras,
        }

Jiali Duan's avatar
Jiali Duan committed
716
717
        def run_cameras():
            # init the cameras
Jiali Duan's avatar
Jiali Duan committed
718
            cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
Jiali Duan's avatar
Jiali Duan committed
719
720
721
722
723
724
725
726
            # xyz - the ground truth point cloud
            xy = torch.randn(num_points, 2) * 2.0 - 1.0
            z = torch.randn(num_points, 1) * 3.0 + 1.0
            xyz = torch.cat((xy, z), dim=-1)
            xy = cameras.transform_points(xyz)

        return run_cameras

Georgia Gkioxari's avatar
Georgia Gkioxari committed
727
728
729
730
731
732
733
734
735
736
737
    def test_equiv_project_points(self, batch_size=50, num_points=100):
        """
        Checks that NDC and screen cameras project points to ndc correctly.
        Applies only to OrthographicCameras and PerspectiveCameras.
        """
        for cam_type in (OrthographicCameras, PerspectiveCameras):
            # init the cameras
            (
                ndc_cameras,
                screen_cameras,
            ) = TestCamerasCommon.init_equiv_cameras_ndc_screen(cam_type, batch_size)
738
739
740
741
            # xyz - the ground truth point cloud in Py3D space
            xy = torch.randn(batch_size, num_points, 2) * 0.3
            z = torch.rand(batch_size, num_points, 1) + 3.0 + 0.1
            xyz = torch.cat((xy, z), dim=2)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
742
            # project points
743
744
745
746
            xyz_ndc = ndc_cameras.transform_points_ndc(xyz)
            xyz_screen = screen_cameras.transform_points_ndc(xyz)
            # check correctness
            self.assertClose(xyz_ndc, xyz_screen, atol=1e-5)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
747

748
749
750
751
752
753
754
755
756
    def test_clone(self, batch_size: int = 10):
        """
        Checks the clone function of the cameras.
        """
        for cam_type in (
            SfMOrthographicCameras,
            OpenGLPerspectiveCameras,
            OpenGLOrthographicCameras,
            SfMPerspectiveCameras,
Georgia Gkioxari's avatar
Georgia Gkioxari committed
757
758
759
760
            FoVOrthographicCameras,
            FoVPerspectiveCameras,
            OrthographicCameras,
            PerspectiveCameras,
761
        ):
David Novotny's avatar
David Novotny committed
762
            cameras = init_random_cameras(cam_type, batch_size)
763
764
765
766
767
768
769
770
771
772
773
774
            cameras = cameras.to(torch.device("cpu"))
            cameras_clone = cameras.clone()

            for var in cameras.__dict__.keys():
                val = getattr(cameras, var)
                val_clone = getattr(cameras_clone, var)
                if torch.is_tensor(val):
                    self.assertClose(val, val_clone)
                    self.assertSeparate(val, val_clone)
                else:
                    self.assertTrue(val == val_clone)

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
    def test_join_cameras_as_batch_errors(self):
        cam0 = PerspectiveCameras(device="cuda:0")
        cam1 = OrthographicCameras(device="cuda:0")

        # Cameras not of the same type
        with self.assertRaisesRegex(ValueError, "same type"):
            join_cameras_as_batch([cam0, cam1])

        cam2 = OrthographicCameras(device="cpu")
        # Cameras not on the same device
        with self.assertRaisesRegex(ValueError, "same device"):
            join_cameras_as_batch([cam1, cam2])

        cam3 = OrthographicCameras(in_ndc=False, device="cuda:0")
        # Different coordinate systems -- all should be in ndc or in screen
        with self.assertRaisesRegex(
            ValueError, "Attribute _in_ndc is not constant across inputs"
        ):
            join_cameras_as_batch([cam1, cam3])

    def join_cameras_as_batch_fov(self, camera_cls):
        R0 = torch.randn((6, 3, 3))
        R1 = torch.randn((3, 3, 3))
        cam0 = camera_cls(znear=10.0, zfar=100.0, R=R0, device="cuda:0")
        cam1 = camera_cls(znear=10.0, zfar=200.0, R=R1, device="cuda:0")

        cam_batch = join_cameras_as_batch([cam0, cam1])

        self.assertEqual(cam_batch._N, cam0._N + cam1._N)
        self.assertEqual(cam_batch.device, cam0.device)
        self.assertClose(cam_batch.R, torch.cat((R0, R1), dim=0).to(device="cuda:0"))

    def join_cameras_as_batch(self, camera_cls):
        R0 = torch.randn((6, 3, 3))
        R1 = torch.randn((3, 3, 3))
        p0 = torch.randn((6, 2, 1))
        p1 = torch.randn((3, 2, 1))
        f0 = 5.0
        f1 = torch.randn(3, 2)
        f2 = torch.randn(3, 1)
        cam0 = camera_cls(
            R=R0,
            focal_length=f0,
            principal_point=p0,
        )
        cam1 = camera_cls(
            R=R1,
            focal_length=f0,
            principal_point=p1,
        )
        cam2 = camera_cls(
            R=R1,
            focal_length=f1,
            principal_point=p1,
        )
        cam3 = camera_cls(
            R=R1,
            focal_length=f2,
            principal_point=p1,
        )
        cam_batch = join_cameras_as_batch([cam0, cam1])

        self.assertEqual(cam_batch._N, cam0._N + cam1._N)
        self.assertEqual(cam_batch.device, cam0.device)
        self.assertClose(cam_batch.R, torch.cat((R0, R1), dim=0))
        self.assertClose(cam_batch.principal_point, torch.cat((p0, p1), dim=0))
        self.assertEqual(cam_batch._in_ndc, cam0._in_ndc)

        # Test one broadcasted value and one fixed value
        # Focal length as (N,) in one camera and (N, 2) in the other
        cam_batch = join_cameras_as_batch([cam0, cam2])
        self.assertEqual(cam_batch._N, cam0._N + cam2._N)
        self.assertClose(cam_batch.R, torch.cat((R0, R1), dim=0))
        self.assertClose(
            cam_batch.focal_length,
            torch.cat([torch.tensor([[f0, f0]]).expand(6, -1), f1], dim=0),
        )

        # Focal length as (N, 1) in one camera and (N, 2) in the other
        cam_batch = join_cameras_as_batch([cam2, cam3])
        self.assertClose(
            cam_batch.focal_length,
            torch.cat([f1, f2.expand(-1, 2)], dim=0),
        )

    def test_join_batch_perspective(self):
        self.join_cameras_as_batch_fov(FoVPerspectiveCameras)
        self.join_cameras_as_batch(PerspectiveCameras)

    def test_join_batch_orthographic(self):
        self.join_cameras_as_batch_fov(FoVOrthographicCameras)
        self.join_cameras_as_batch(OrthographicCameras)

facebook-github-bot's avatar
facebook-github-bot committed
868

Georgia Gkioxari's avatar
Georgia Gkioxari committed
869
870
871
872
873
874
############################################################
#                FoVPerspective Camera                     #
############################################################


class TestFoVPerspectiveProjection(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
875
876
877
    def test_perspective(self):
        far = 10.0
        near = 1.0
Georgia Gkioxari's avatar
Georgia Gkioxari committed
878
        cameras = FoVPerspectiveCameras(znear=near, zfar=far, fov=60.0)
facebook-github-bot's avatar
facebook-github-bot committed
879
880
881
882
883
884
885
886
887
        P = cameras.get_projection_transform()
        # vertices are at the far clipping plane so z gets mapped to 1.
        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor(
            [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
        )
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = perspective_project_naive(vertices, fov=60.0)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
888
889
890
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(far * v1[..., 2], v2[..., 2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
891
892
893
894
895
896
897
898

        # vertices are at the near clipping plane so z gets mapped to 0.0.
        vertices[..., 2] = near
        projected_verts = torch.tensor(
            [np.sqrt(3) / near, 2 * np.sqrt(3) / near, 0.0], dtype=torch.float32
        )
        v1 = P.transform_points(vertices)
        v2 = perspective_project_naive(vertices, fov=60.0)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
899
900
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
901
902

    def test_perspective_kwargs(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
903
        cameras = FoVPerspectiveCameras(znear=5.0, zfar=100.0, fov=0.0)
facebook-github-bot's avatar
facebook-github-bot committed
904
905
906
907
908
909
910
911
912
        # Override defaults by passing in values to get_projection_transform
        far = 10.0
        P = cameras.get_projection_transform(znear=1.0, zfar=far, fov=60.0)
        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor(
            [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
        )
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
913
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
914
915
916
917
918

    def test_perspective_mixed_inputs_broadcast(self):
        far = torch.tensor([10.0, 20.0], dtype=torch.float32)
        near = 1.0
        fov = torch.tensor(60.0)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
919
        cameras = FoVPerspectiveCameras(znear=near, zfar=far, fov=fov)
facebook-github-bot's avatar
facebook-github-bot committed
920
921
922
        P = cameras.get_projection_transform()
        vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
        z1 = 1.0  # vertices at far clipping plane so z = 1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
923
        z2 = (20.0 / (20.0 - 1.0) * 10.0 + -20.0 / (20.0 - 1.0)) / 10.0
facebook-github-bot's avatar
facebook-github-bot committed
924
925
926
927
928
929
930
931
932
933
        projected_verts = torch.tensor(
            [
                [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z1],
                [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z2],
            ],
            dtype=torch.float32,
        )
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = perspective_project_naive(vertices, fov=60.0)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
934
935
        self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
936
937
938
939
940

    def test_perspective_mixed_inputs_grad(self):
        far = torch.tensor([10.0])
        near = 1.0
        fov = torch.tensor(60.0, requires_grad=True)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
941
        cameras = FoVPerspectiveCameras(znear=near, zfar=far, fov=fov)
facebook-github-bot's avatar
facebook-github-bot committed
942
943
944
945
946
947
948
949
950
951
952
        P = cameras.get_projection_transform()
        vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
        vertices_batch = vertices[None, None, :]
        v1 = P.transform_points(vertices_batch).squeeze()
        v1.sum().backward()
        self.assertTrue(hasattr(fov, "grad"))
        fov_grad = fov.grad.clone()
        half_fov_rad = (math.pi / 180.0) * fov.detach() / 2.0
        grad_cotan = -(1.0 / (torch.sin(half_fov_rad) ** 2.0) * 1 / 2.0)
        grad_fov = (math.pi / 180.0) * grad_cotan
        grad_fov = (vertices[0] + vertices[1]) * grad_fov / 10.0
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
953
        self.assertClose(fov_grad, grad_fov)
facebook-github-bot's avatar
facebook-github-bot committed
954
955
956

    def test_camera_class_init(self):
        device = torch.device("cuda:0")
Georgia Gkioxari's avatar
Georgia Gkioxari committed
957
        cam = FoVPerspectiveCameras(znear=10.0, zfar=(100.0, 200.0))
facebook-github-bot's avatar
facebook-github-bot committed
958
959
960
961
962
963
964
965
966

        # Check broadcasting
        self.assertTrue(cam.znear.shape == (2,))
        self.assertTrue(cam.zfar.shape == (2,))

        # Test to
        new_cam = cam.to(device=device)
        self.assertTrue(new_cam.device == device)

967
    def test_getitem(self):
Roman Shapovalov's avatar
Roman Shapovalov committed
968
969
        N_CAMERAS = 6
        R_matrix = torch.randn((N_CAMERAS, 3, 3))
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
        cam = FoVPerspectiveCameras(znear=10.0, zfar=100.0, R=R_matrix)

        # Check get item returns an instance of the same class
        # with all the same keys
        c0 = cam[0]
        self.assertTrue(isinstance(c0, FoVPerspectiveCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())

        # Check all fields correct in get item with int index
        self.assertEqual(len(c0), 1)
        self.assertClose(c0.zfar, torch.tensor([100.0]))
        self.assertClose(c0.znear, torch.tensor([10.0]))
        self.assertClose(c0.R, R_matrix[0:1, ...])
        self.assertEqual(c0.device, torch.device("cpu"))

        # Check list(int) index
        c012 = cam[[0, 1, 2]]
        self.assertEqual(len(c012), 3)
        self.assertClose(c012.zfar, torch.tensor([100.0] * 3))
        self.assertClose(c012.znear, torch.tensor([10.0] * 3))
        self.assertClose(c012.R, R_matrix[0:3, ...])

        # Check torch.LongTensor index
Roman Shapovalov's avatar
Roman Shapovalov committed
993
994
        SLICE = [1, 3, 5]
        index = torch.tensor(SLICE, dtype=torch.int64)
995
996
997
998
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
        self.assertClose(c135.zfar, torch.tensor([100.0] * 3))
        self.assertClose(c135.znear, torch.tensor([10.0] * 3))
Roman Shapovalov's avatar
Roman Shapovalov committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
        self.assertClose(c135.R, R_matrix[SLICE, ...])

        # Check torch.BoolTensor index
        bool_slice = [i in SLICE for i in range(N_CAMERAS)]
        index = torch.tensor(bool_slice, dtype=torch.bool)
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
        self.assertClose(c135.zfar, torch.tensor([100.0] * 3))
        self.assertClose(c135.znear, torch.tensor([10.0] * 3))
        self.assertClose(c135.R, R_matrix[SLICE, ...])
1009
1010
1011

        # Check errors with get item
        with self.assertRaisesRegex(ValueError, "out of bounds"):
Roman Shapovalov's avatar
Roman Shapovalov committed
1012
1013
1014
1015
1016
            cam[N_CAMERAS]

        with self.assertRaisesRegex(ValueError, "does not match cameras"):
            index = torch.tensor([1, 0, 1], dtype=torch.bool)
            cam[index]
1017
1018
1019
1020
1021

        with self.assertRaisesRegex(ValueError, "Invalid index type"):
            cam[slice(0, 1)]

        with self.assertRaisesRegex(ValueError, "Invalid index type"):
Roman Shapovalov's avatar
Roman Shapovalov committed
1022
1023
1024
1025
            cam[[True, False]]

        with self.assertRaisesRegex(ValueError, "Invalid index type"):
            index = torch.tensor(SLICE, dtype=torch.float32)
1026
1027
            cam[index]

facebook-github-bot's avatar
facebook-github-bot committed
1028
    def test_get_full_transform(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1029
        cam = FoVPerspectiveCameras()
facebook-github-bot's avatar
facebook-github-bot committed
1030
1031
1032
1033
        T = torch.tensor([0.0, 0.0, 1.0]).view(1, -1)
        R = look_at_rotation(T)
        P = cam.get_full_projection_transform(R=R, T=T)
        self.assertTrue(isinstance(P, Transform3d))
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1034
1035
        self.assertClose(cam.R, R)
        self.assertClose(cam.T, T)
facebook-github-bot's avatar
facebook-github-bot committed
1036
1037
1038
1039
1040

    def test_transform_points(self):
        # Check transform_points methods works with default settings for
        # RT and P
        far = 10.0
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1041
        cam = FoVPerspectiveCameras(znear=1.0, zfar=far, fov=60.0)
facebook-github-bot's avatar
facebook-github-bot committed
1042
1043
1044
1045
1046
1047
1048
        points = torch.tensor([1, 2, far], dtype=torch.float32)
        points = points.view(1, 1, 3).expand(5, 10, -1)
        projected_points = torch.tensor(
            [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
        )
        projected_points = projected_points.view(1, 1, 3).expand(5, 10, -1)
        new_points = cam.transform_points(points)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1049
        self.assertClose(new_points, projected_points)
facebook-github-bot's avatar
facebook-github-bot committed
1050

1051
1052
1053
    def test_perspective_type(self):
        cam = FoVPerspectiveCameras(znear=1.0, zfar=10.0, fov=60.0)
        self.assertTrue(cam.is_perspective())
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1054
        self.assertEqual(cam.get_znear(), 1.0)
1055

facebook-github-bot's avatar
facebook-github-bot committed
1056

Georgia Gkioxari's avatar
Georgia Gkioxari committed
1057
1058
1059
1060
1061
1062
############################################################
#                FoVOrthographic Camera                    #
############################################################


class TestFoVOrthographicProjection(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
1063
1064
1065
    def test_orthographic(self):
        far = 10.0
        near = 1.0
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1066
        cameras = FoVOrthographicCameras(znear=near, zfar=far)
facebook-github-bot's avatar
facebook-github-bot committed
1067
1068
1069
1070
1071
1072
1073
        P = cameras.get_projection_transform()

        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1074
1075
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1076
1077
1078
1079
1080

        vertices[..., 2] = near
        projected_verts[2] = 0.0
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1081
1082
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1083
1084
1085
1086
1087
1088
1089
1090

    def test_orthographic_scaled(self):
        vertices = torch.tensor([1, 2, 0.5], dtype=torch.float32)
        vertices = vertices[None, None, :]
        scale = torch.tensor([[2.0, 0.5, 20]])
        # applying the scale puts the z coordinate at the far clipping plane
        # so the z is mapped to 1.0
        projected_verts = torch.tensor([2, 1, 1], dtype=torch.float32)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1091
        cameras = FoVOrthographicCameras(znear=1.0, zfar=10.0, scale_xyz=scale)
facebook-github-bot's avatar
facebook-github-bot committed
1092
1093
1094
        P = cameras.get_projection_transform()
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices, scale)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1095
1096
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1, projected_verts[None, None])
facebook-github-bot's avatar
facebook-github-bot committed
1097
1098

    def test_orthographic_kwargs(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1099
        cameras = FoVOrthographicCameras(znear=5.0, zfar=100.0)
facebook-github-bot's avatar
facebook-github-bot committed
1100
1101
1102
1103
1104
1105
        far = 10.0
        P = cameras.get_projection_transform(znear=1.0, zfar=far)
        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1106
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1107
1108
1109
1110

    def test_orthographic_mixed_inputs_broadcast(self):
        far = torch.tensor([10.0, 20.0])
        near = 1.0
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1111
        cameras = FoVOrthographicCameras(znear=near, zfar=far)
facebook-github-bot's avatar
facebook-github-bot committed
1112
1113
        P = cameras.get_projection_transform()
        vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
Nikhila Ravi's avatar
Nikhila Ravi committed
1114
        z2 = 1.0 / (20.0 - 1.0) * 10.0 + -1.0 / (20.0 - 1.0)
facebook-github-bot's avatar
facebook-github-bot committed
1115
1116
1117
1118
1119
1120
        projected_verts = torch.tensor(
            [[1.0, 2.0, 1.0], [1.0, 2.0, z2]], dtype=torch.float32
        )
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1121
1122
        self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1123
1124
1125
1126
1127

    def test_orthographic_mixed_inputs_grad(self):
        far = torch.tensor([10.0])
        near = 1.0
        scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1128
        cameras = FoVOrthographicCameras(znear=near, zfar=far, scale_xyz=scale)
facebook-github-bot's avatar
facebook-github-bot committed
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
        P = cameras.get_projection_transform()
        vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
        vertices_batch = vertices[None, None, :]
        v1 = P.transform_points(vertices_batch)
        v1.sum().backward()
        self.assertTrue(hasattr(scale, "grad"))
        scale_grad = scale.grad.clone()
        grad_scale = torch.tensor(
            [
                [
                    vertices[0] * P._matrix[:, 0, 0],
                    vertices[1] * P._matrix[:, 1, 1],
                    vertices[2] * P._matrix[:, 2, 2],
                ]
            ]
        )
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1145
        self.assertClose(scale_grad, grad_scale)
facebook-github-bot's avatar
facebook-github-bot committed
1146

1147
1148
1149
    def test_perspective_type(self):
        cam = FoVOrthographicCameras(znear=1.0, zfar=10.0)
        self.assertFalse(cam.is_perspective())
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1150
        self.assertEqual(cam.get_znear(), 1.0)
1151

1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
    def test_getitem(self):
        R_matrix = torch.randn((6, 3, 3))
        scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
        cam = FoVOrthographicCameras(
            znear=10.0, zfar=100.0, R=R_matrix, scale_xyz=scale
        )

        # Check get item returns an instance of the same class
        # with all the same keys
        c0 = cam[0]
        self.assertTrue(isinstance(c0, FoVOrthographicCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())

        # Check torch.LongTensor index
        index = torch.tensor([1, 3, 5], dtype=torch.int64)
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
        self.assertClose(c135.zfar, torch.tensor([100.0] * 3))
        self.assertClose(c135.znear, torch.tensor([10.0] * 3))
        self.assertClose(c135.min_x, torch.tensor([-1.0] * 3))
        self.assertClose(c135.max_x, torch.tensor([1.0] * 3))
        self.assertClose(c135.R, R_matrix[[1, 3, 5], ...])
        self.assertClose(c135.scale_xyz, scale.expand(3, -1))

facebook-github-bot's avatar
facebook-github-bot committed
1176

Georgia Gkioxari's avatar
Georgia Gkioxari committed
1177
1178
1179
1180
1181
1182
############################################################
#                Orthographic Camera                       #
############################################################


class TestOrthographicProjection(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
1183
    def test_orthographic(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1184
        cameras = OrthographicCameras()
facebook-github-bot's avatar
facebook-github-bot committed
1185
1186
1187
1188
1189
1190
1191
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        projected_verts = vertices.clone()
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(vertices)

Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1192
1193
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1, projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1194
1195
1196
1197
1198

    def test_orthographic_scaled(self):
        focal_length_x = 10.0
        focal_length_y = 15.0

Georgia Gkioxari's avatar
Georgia Gkioxari committed
1199
        cameras = OrthographicCameras(focal_length=((focal_length_x, focal_length_y),))
facebook-github-bot's avatar
facebook-github-bot committed
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        projected_verts = vertices.clone()
        projected_verts[:, :, 0] *= focal_length_x
        projected_verts[:, :, 1] *= focal_length_y
        v1 = P.transform_points(vertices)
        v2 = orthographic_project_naive(
            vertices, scale_xyz=(focal_length_x, focal_length_y, 1.0)
        )
        v3 = cameras.transform_points(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1211
1212
1213
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v3[..., :2], v2[..., :2])
        self.assertClose(v1, projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1214
1215

    def test_orthographic_kwargs(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1216
        cameras = OrthographicCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
facebook-github-bot's avatar
facebook-github-bot committed
1217
1218
1219
1220
1221
1222
1223
1224
1225
        P = cameras.get_projection_transform(
            focal_length=2.0, principal_point=((2.5, 3.5),)
        )
        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        projected_verts = vertices.clone()
        projected_verts[:, :, :2] *= 2.0
        projected_verts[:, :, 0] += 2.5
        projected_verts[:, :, 1] += 3.5
        v1 = P.transform_points(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1226
        self.assertClose(v1, projected_verts)
facebook-github-bot's avatar
facebook-github-bot committed
1227

1228
1229
1230
    def test_perspective_type(self):
        cam = OrthographicCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
        self.assertFalse(cam.is_perspective())
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1231
        self.assertIsNone(cam.get_znear())
1232

1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
    def test_getitem(self):
        R_matrix = torch.randn((6, 3, 3))
        principal_point = torch.randn((6, 2, 1))
        focal_length = 5.0
        cam = OrthographicCameras(
            R=R_matrix,
            focal_length=focal_length,
            principal_point=principal_point,
        )

        # Check get item returns an instance of the same class
        # with all the same keys
        c0 = cam[0]
        self.assertTrue(isinstance(c0, OrthographicCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())

        # Check torch.LongTensor index
        index = torch.tensor([1, 3, 5], dtype=torch.int64)
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1253
        self.assertClose(c135.focal_length, torch.tensor([[5.0, 5.0]] * 3))
1254
1255
1256
        self.assertClose(c135.R, R_matrix[[1, 3, 5], ...])
        self.assertClose(c135.principal_point, principal_point[[1, 3, 5], ...])

facebook-github-bot's avatar
facebook-github-bot committed
1257

Georgia Gkioxari's avatar
Georgia Gkioxari committed
1258
1259
1260
1261
1262
1263
############################################################
#                Perspective Camera                        #
############################################################


class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
1264
    def test_perspective(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1265
        cameras = PerspectiveCameras()
facebook-github-bot's avatar
facebook-github-bot committed
1266
1267
1268
1269
1270
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        v1 = P.transform_points(vertices)
        v2 = sfm_perspective_project_naive(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1271
        self.assertClose(v1, v2)
facebook-github-bot's avatar
facebook-github-bot committed
1272
1273
1274
1275
1276
1277
1278

    def test_perspective_scaled(self):
        focal_length_x = 10.0
        focal_length_y = 15.0
        p0x = 15.0
        p0y = 30.0

Georgia Gkioxari's avatar
Georgia Gkioxari committed
1279
        cameras = PerspectiveCameras(
facebook-github-bot's avatar
facebook-github-bot committed
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
            focal_length=((focal_length_x, focal_length_y),),
            principal_point=((p0x, p0y),),
        )
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        v1 = P.transform_points(vertices)
        v2 = sfm_perspective_project_naive(
            vertices, fx=focal_length_x, fy=focal_length_y, p0x=p0x, p0y=p0y
        )
        v3 = cameras.transform_points(vertices)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1291
1292
        self.assertClose(v1, v2)
        self.assertClose(v3[..., :2], v2[..., :2])
facebook-github-bot's avatar
facebook-github-bot committed
1293
1294

    def test_perspective_kwargs(self):
Georgia Gkioxari's avatar
Georgia Gkioxari committed
1295
        cameras = PerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
facebook-github-bot's avatar
facebook-github-bot committed
1296
1297
1298
1299
1300
        P = cameras.get_projection_transform(
            focal_length=2.0, principal_point=((2.5, 3.5),)
        )
        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        v1 = P.transform_points(vertices)
1301
        v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5)
1302
        self.assertClose(v1, v2, atol=1e-6)
1303
1304
1305
1306

    def test_perspective_type(self):
        cam = PerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
        self.assertTrue(cam.is_perspective())
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1307
        self.assertIsNone(cam.get_znear())
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328

    def test_getitem(self):
        R_matrix = torch.randn((6, 3, 3))
        principal_point = torch.randn((6, 2, 1))
        focal_length = 5.0
        cam = PerspectiveCameras(
            R=R_matrix,
            focal_length=focal_length,
            principal_point=principal_point,
        )

        # Check get item returns an instance of the same class
        # with all the same keys
        c0 = cam[0]
        self.assertTrue(isinstance(c0, PerspectiveCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())

        # Check torch.LongTensor index
        index = torch.tensor([1, 3, 5], dtype=torch.int64)
        c135 = cam[index]
        self.assertEqual(len(c135), 3)
Jeremy Reizenstein's avatar
Jeremy Reizenstein committed
1329
        self.assertClose(c135.focal_length, torch.tensor([[5.0, 5.0]] * 3))
1330
1331
1332
1333
1334
        self.assertClose(c135.R, R_matrix[[1, 3, 5], ...])
        self.assertClose(c135.principal_point, principal_point[[1, 3, 5], ...])

        # Check in_ndc is handled correctly
        self.assertEqual(cam._in_ndc, c0._in_ndc)
Jiali Duan's avatar
Jiali Duan committed
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392


############################################################
#                FishEye Camera                        #
############################################################


class TestFishEyeProjection(TestCaseMixin, unittest.TestCase):
    def setUpSimpleCase(self) -> None:
        super().setUp()
        focal = torch.tensor([[240]], dtype=torch.float32)
        principal_point = torch.tensor([[320, 240]])
        p_3d = torch.tensor(
            [
                [2.0, 3.0, 1.0],
                [3.0, 2.0, 1.0],
            ],
            dtype=torch.float32,
        )
        return focal, principal_point, p_3d

    def setUpAriaCase(self) -> None:
        super().setUp()
        torch.manual_seed(42)
        focal = torch.tensor([[608.9255557152]], dtype=torch.float32)
        principal_point = torch.tensor(
            [[712.0114821205, 706.8666571177]], dtype=torch.float32
        )
        radial_params = torch.tensor(
            [
                [
                    0.3877090026,
                    -0.315613384,
                    -0.3434984955,
                    1.8565874201,
                    -2.1799372221,
                    0.7713834763,
                ],
            ],
            dtype=torch.float32,
        )
        tangential_params = torch.tensor(
            [[-0.0002747019, 0.0005228974]], dtype=torch.float32
        )
        thin_prism_params = torch.tensor(
            [
                [0.000134884, -0.000084822, -0.0009420014, -0.0001276838],
            ],
            dtype=torch.float32,
        )
        return (
            focal,
            principal_point,
            radial_params,
            tangential_params,
            thin_prism_params,
        )

1393
    def setUpBatchCameras(self, combination: None) -> None:
Jiali Duan's avatar
Jiali Duan committed
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
        super().setUp()
        focal, principal_point, p_3d = self.setUpSimpleCase()
        radial_params = torch.tensor(
            [
                [0, 0, 0, 0, 0, 0],
            ],
            dtype=torch.float32,
        )
        tangential_params = torch.tensor([[0, 0]], dtype=torch.float32)
        thin_prism_params = torch.tensor([[0, 0, 0, 0]], dtype=torch.float32)
        (
            focal1,
            principal_point1,
            radial_params1,
            tangential_params1,
            thin_prism_params1,
        ) = self.setUpAriaCase()
        focal = torch.cat([focal, focal1], dim=0)
        principal_point = torch.cat([principal_point, principal_point1], dim=0)
        radial_params = torch.cat([radial_params, radial_params1], dim=0)
        tangential_params = torch.cat([tangential_params, tangential_params1], dim=0)
        thin_prism_params = torch.cat([thin_prism_params, thin_prism_params1], dim=0)
1416
1417
        if combination is None:
            combination = [True, True, True]
Jiali Duan's avatar
Jiali Duan committed
1418
        cameras = FishEyeCameras(
1419
1420
1421
            use_radial=combination[0],
            use_tangential=combination[1],
            use_thin_prism=combination[2],
Jiali Duan's avatar
Jiali Duan committed
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
            focal_length=focal,
            principal_point=principal_point,
            radial_params=radial_params,
            tangential_params=tangential_params,
            thin_prism_params=thin_prism_params,
        )

        return cameras

    def test_distortion_params_set_to_zeors(self):
        # test case 1: all distortion params are 0. Note that
        # setting radial_params to zeros is not equivalent to
        # disabling radial distortions, set use_radial=False does
        focal, principal_point, p_3d = self.setUpSimpleCase()
        cameras = FishEyeCameras(
            focal_length=focal,
            principal_point=principal_point,
        )
        uv_case1 = cameras.transform_points(p_3d)
        self.assertClose(
            uv_case1,
            torch.tensor(
                [[493.0993, 499.6489, 1.0], [579.6489, 413.0993, 1.0]],
            ),
        )
        # test case 2: equivalent of test case 1 by
        # disabling use_tangential and use_thin_prism
        cameras = FishEyeCameras(
            focal_length=focal,
            principal_point=principal_point,
            use_tangential=False,
            use_thin_prism=False,
        )
        uv_case2 = cameras.transform_points(p_3d)
        self.assertClose(uv_case2, uv_case1)

    def test_fisheye_against_perspective_cameras(self):
        # test case: check equivalence with PerspectiveCameras
        # by disabling all distortions
        focal, principal_point, p_3d = self.setUpSimpleCase()
        cameras = PerspectiveCameras(
            focal_length=focal,
            principal_point=principal_point,
        )
        P = cameras.get_projection_transform()
        uv_perspective = P.transform_points(p_3d)

        # disable all distortions
        cameras = FishEyeCameras(
            focal_length=focal,
            principal_point=principal_point,
            use_radial=False,
            use_tangential=False,
            use_thin_prism=False,
        )
        uv = cameras.transform_points(p_3d)
        self.assertClose(uv, uv_perspective)

    def test_project_shape_broadcasts(self):
        focal, principal_point, p_3d = self.setUpSimpleCase()
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
        torch.set_printoptions(precision=6)
        combinations = product([0, 1], repeat=3)
        for combination in combinations:
            cameras = FishEyeCameras(
                use_radial=combination[0],
                use_tangential=combination[1],
                use_thin_prism=combination[2],
                focal_length=focal,
                principal_point=principal_point,
            )
            # test case 1:
            # 1 transform with points of shape (P, 3) -> (P, 3)
            # 1 transform with points of shape (1, P, 3) -> (1, P, 3)
            # 1 transform with points of shape (M, P, 3) -> (M, P, 3)
            points = p_3d.repeat(1, 1, 1)
            cameras = FishEyeCameras(
                focal_length=focal,
                principal_point=principal_point,
                use_radial=False,
                use_tangential=False,
                use_thin_prism=False,
            )
            uv = cameras.transform_points(p_3d)
            uv_point_batch = cameras.transform_points(points)
            self.assertClose(uv_point_batch, uv.repeat(1, 1, 1))
Jiali Duan's avatar
Jiali Duan committed
1507
1508
1509
1510
1511
1512
1513
1514

        points = p_3d.repeat(3, 1, 1)
        uv_point_batch = cameras.transform_points(points)
        self.assertClose(uv_point_batch, uv.repeat(3, 1, 1))

        # test case 2
        # test with N transforms and points of shape (P, 3) -> (N, P, 3)
        # test with N transforms and points of shape (1, P, 3) -> (N, P, 3)
1515
        torch.set_printoptions(sci_mode=False)
Jiali Duan's avatar
Jiali Duan committed
1516
1517
1518
1519
1520
1521
1522
1523
1524
        p_3d = torch.tensor(
            [
                [2.0, 3.0, 1.0],
                [3.0, 2.0, 1.0],
            ]
        )
        expected_res = torch.tensor(
            [
                [
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
                    [
                        [800.000000, 960.000000, 1.000000],
                        [1040.000000, 720.000000, 1.000000],
                    ],
                    [
                        [1929.862549, 2533.643311, 1.000000],
                        [2538.788086, 1924.717773, 1.000000],
                    ],
                ],
                [
                    [
                        [800.000000, 960.000000, 1.000000],
                        [1040.000000, 720.000000, 1.000000],
                    ],
                    [
                        [1927.272095, 2524.220459, 1.000000],
                        [2536.197754, 1915.295166, 1.000000],
                    ],
                ],
                [
                    [
                        [800.000000, 960.000000, 1.000000],
                        [1040.000000, 720.000000, 1.000000],
                    ],
                    [
                        [1930.050293, 2538.434814, 1.000000],
                        [2537.956543, 1927.569092, 1.000000],
                    ],
                ],
                [
                    [
                        [800.000000, 960.000000, 1.000000],
                        [1040.000000, 720.000000, 1.000000],
                    ],
                    [
                        [1927.459839, 2529.011963, 1.000000],
                        [2535.366211, 1918.146484, 1.000000],
                    ],
                ],
                [
                    [
                        [493.099304, 499.648926, 1.000000],
                        [579.648926, 413.099304, 1.000000],
                    ],
                    [
                        [1662.673950, 2132.860352, 1.000000],
                        [2138.005127, 1657.529053, 1.000000],
                    ],
                ],
                [
                    [
                        [493.099304, 499.648926, 1.000000],
                        [579.648926, 413.099304, 1.000000],
                    ],
                    [
                        [1660.083496, 2123.437744, 1.000000],
                        [2135.414795, 1648.106445, 1.000000],
                    ],
                ],
                [
                    [
                        [493.099304, 499.648926, 1.000000],
                        [579.648926, 413.099304, 1.000000],
                    ],
                    [
                        [1662.861816, 2137.651855, 1.000000],
                        [2137.173828, 1660.380371, 1.000000],
                    ],
Jiali Duan's avatar
Jiali Duan committed
1593
1594
                ],
                [
1595
1596
1597
1598
1599
1600
1601
1602
                    [
                        [493.099304, 499.648926, 1.000000],
                        [579.648926, 413.099304, 1.000000],
                    ],
                    [
                        [1660.271240, 2128.229248, 1.000000],
                        [2134.583496, 1650.957764, 1.000000],
                    ],
Jiali Duan's avatar
Jiali Duan committed
1603
1604
1605
                ],
            ]
        )
1606
1607
1608
1609
1610
        combinations = product([0, 1], repeat=3)
        for i, combination in enumerate(combinations):
            cameras = self.setUpBatchCameras(combination)
            uv_point_batch = cameras.transform_points(p_3d)
            self.assertClose(uv_point_batch, expected_res[i])
Jiali Duan's avatar
Jiali Duan committed
1611

1612
1613
            uv_point_batch = cameras.transform_points(p_3d.repeat(1, 1, 1))
            self.assertClose(uv_point_batch, expected_res[i].repeat(1, 1, 1))
Jiali Duan's avatar
Jiali Duan committed
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660

    def test_cuda(self):
        """
        Test cuda device
        """
        focal, principal_point, p_3d = self.setUpSimpleCase()
        cameras_cuda = FishEyeCameras(
            focal_length=focal,
            principal_point=principal_point,
            device="cuda:0",
        )
        uv = cameras_cuda.transform_points(p_3d)
        expected_res = torch.tensor(
            [[493.0993, 499.6489, 1.0], [579.6489, 413.0993, 1.0]],
        )
        self.assertClose(uv, expected_res.to("cuda:0"))

        rep_3d = cameras_cuda.unproject_points(uv)
        self.assertClose(rep_3d, p_3d.to("cuda:0"))

    def test_unproject_shape_broadcasts(self):
        # test case 1:
        # 1 transform with points of (P, 3) -> (P, 3)
        # 1 transform with points of (M, P, 3) -> (M, P, 3)
        (
            focal,
            principal_point,
            radial_params,
            tangential_params,
            thin_prism_params,
        ) = self.setUpAriaCase()
        xy_depth = torch.tensor(
            [
                [2134.5814033, 1650.95653328, 1.0],
                [1074.25442904, 1159.52461285, 1.0],
            ]
        )
        cameras = FishEyeCameras(
            focal_length=focal,
            principal_point=principal_point,
            radial_params=radial_params,
            tangential_params=tangential_params,
            thin_prism_params=thin_prism_params,
        )
        rep_3d = cameras.unproject_points(xy_depth)
        expected_res = torch.tensor(
            [
1661
1662
1663
1664
1665
1666
1667
1668
1669
                [[2.999442, 1.990583, 1.000000], [0.666728, 0.833142, 1.000000]],
                [[2.997338, 2.005411, 1.000000], [0.666859, 0.834456, 1.000000]],
                [[3.002090, 1.985229, 1.000000], [0.666537, 0.832025, 1.000000]],
                [[2.999999, 2.000000, 1.000000], [0.666667, 0.833333, 1.000000]],
                [[2.999442, 1.990583, 1.000000], [0.666728, 0.833142, 1.000000]],
                [[2.997338, 2.005411, 1.000000], [0.666859, 0.834456, 1.000000]],
                [[3.002090, 1.985229, 1.000000], [0.666537, 0.832025, 1.000000]],
                [[2.999999, 2.000000, 1.000000], [0.666667, 0.833333, 1.000000]],
            ]
Jiali Duan's avatar
Jiali Duan committed
1670
        )
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
        torch.set_printoptions(precision=6)
        combinations = product([0, 1], repeat=3)
        for i, combination in enumerate(combinations):
            cameras = FishEyeCameras(
                use_radial=combination[0],
                use_tangential=combination[1],
                use_thin_prism=combination[2],
                focal_length=focal,
                principal_point=principal_point,
                radial_params=radial_params,
                tangential_params=tangential_params,
                thin_prism_params=thin_prism_params,
            )
            rep_3d = cameras.unproject_points(xy_depth)
            self.assertClose(rep_3d, expected_res[i])
            rep_3d = cameras.unproject_points(xy_depth.repeat(3, 1, 1))
            self.assertClose(rep_3d, expected_res[i].repeat(3, 1, 1))

            # test case 2:
            # N transforms with points of (P, 3) -> (N, P, 3)
            # N transforms with points of (1, P, 3) -> (N, P, 3)
            cameras = FishEyeCameras(
                use_radial=combination[0],
                use_tangential=combination[1],
                use_thin_prism=combination[2],
                focal_length=focal.repeat(2, 1),
                principal_point=principal_point.repeat(2, 1),
                radial_params=radial_params.repeat(2, 1),
                tangential_params=tangential_params.repeat(2, 1),
                thin_prism_params=thin_prism_params.repeat(2, 1),
            )
            rep_3d = cameras.unproject_points(xy_depth)
            self.assertClose(rep_3d, expected_res[i].repeat(2, 1, 1))
Jiali Duan's avatar
Jiali Duan committed
1704
1705
1706
1707
1708
1709

    def test_unhandled_shape(self):
        """
        Test error handling when shape of transforms
        and points are not expected.
        """
1710
        cameras = self.setUpBatchCameras(None)
Jiali Duan's avatar
Jiali Duan committed
1711
1712
1713
1714
1715
1716
1717
        points = torch.rand(3, 3, 1)
        with self.assertRaises(ValueError):
            cameras.transform_points(points)

    def test_getitem(self):
        # Check get item returns an instance of the same class
        # with all the same keys
1718
        cam = self.setUpBatchCameras(None)
Jiali Duan's avatar
Jiali Duan committed
1719
1720
1721
        c0 = cam[0]
        self.assertTrue(isinstance(c0, FishEyeCameras))
        self.assertEqual(cam.__dict__.keys(), c0.__dict__.keys())