Commit 7788a380 authored by David Novotny's avatar David Novotny Committed by Facebook GitHub Bot
Browse files

Camera inheritance + unprojections

Summary: Made a CameraBase class. Added `unproject_points` method for each camera class.

Reviewed By: nikhilaravi

Differential Revision: D20373602

fbshipit-source-id: 7e3da5ae420091b5fcab400a9884ef29ad7a7343
parent 365945b1
...@@ -9,6 +9,8 @@ from .blending import ( ...@@ -9,6 +9,8 @@ from .blending import (
from .cameras import ( from .cameras import (
OpenGLOrthographicCameras, OpenGLOrthographicCameras,
OpenGLPerspectiveCameras, OpenGLPerspectiveCameras,
SfMOrthographicCameras,
SfMPerspectiveCameras,
camera_position_from_spherical_angles, camera_position_from_spherical_angles,
get_world_to_view_transform, get_world_to_view_transform,
look_at_rotation, look_at_rotation,
......
This diff is collapsed.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import inspect
import warnings import warnings
from typing import Any, Union from typing import Any, Union
...@@ -168,10 +170,13 @@ class TensorProperties(object): ...@@ -168,10 +170,13 @@ class TensorProperties(object):
""" """
for k in dir(self): for k in dir(self):
v = getattr(self, k) v = getattr(self, k)
if k == "device": if inspect.ismethod(v) or k.startswith("__"):
setattr(self, k, v) continue
if torch.is_tensor(v): if torch.is_tensor(v):
setattr(other, k, v.clone()) v_clone = v.clone()
else:
v_clone = copy.deepcopy(v)
setattr(other, k, v_clone)
return other return other
def gather_props(self, batch_idx): def gather_props(self, batch_idx):
......
...@@ -32,6 +32,7 @@ import numpy as np ...@@ -32,6 +32,7 @@ import numpy as np
import torch import torch
from common_testing import TestCaseMixin from common_testing import TestCaseMixin
from pytorch3d.renderer.cameras import ( from pytorch3d.renderer.cameras import (
CamerasBase,
OpenGLOrthographicCameras, OpenGLOrthographicCameras,
OpenGLPerspectiveCameras, OpenGLPerspectiveCameras,
SfMOrthographicCameras, SfMOrthographicCameras,
...@@ -347,6 +348,8 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): ...@@ -347,6 +348,8 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
RT = get_world_to_view_transform(R=R, T=T) RT = get_world_to_view_transform(R=R, T=T)
self.assertTrue(isinstance(RT, Transform3d)) self.assertTrue(isinstance(RT, Transform3d))
class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
def test_view_transform_class_method(self): def test_view_transform_class_method(self):
T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1) T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
R = look_at_rotation(T) R = look_at_rotation(T)
...@@ -377,6 +380,108 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): ...@@ -377,6 +380,108 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
C_ = -torch.bmm(R, T[:, :, None])[:, :, 0] C_ = -torch.bmm(R, T[:, :, None])[:, :, 0]
self.assertTrue(torch.allclose(C, C_, atol=1e-05)) self.assertTrue(torch.allclose(C, C_, atol=1e-05))
@staticmethod
def init_random_cameras(cam_type: CamerasBase, batch_size: int):
cam_params = {}
T = torch.randn(batch_size, 3) * 0.03
T[:, 2] = 4
R = so3_exponential_map(torch.randn(batch_size, 3) * 3.0)
cam_params = {"R": R, "T": T}
if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
if cam_type == OpenGLPerspectiveCameras:
cam_params["fov"] = torch.rand(batch_size) * 60 + 30
cam_params["aspect_ratio"] = torch.rand(batch_size) * 0.5 + 0.5
else:
cam_params["top"] = torch.rand(batch_size) * 0.2 + 0.9
cam_params["bottom"] = -torch.rand(batch_size) * 0.2 - 0.9
cam_params["left"] = -torch.rand(batch_size) * 0.2 - 0.9
cam_params["right"] = torch.rand(batch_size) * 0.2 + 0.9
elif cam_type in (SfMOrthographicCameras, SfMPerspectiveCameras):
cam_params["focal_length"] = torch.rand(batch_size) * 10 + 0.1
cam_params["principal_point"] = torch.randn((batch_size, 2))
else:
raise ValueError(str(cam_type))
return cam_type(**cam_params)
def test_unproject_points(self, batch_size=50, num_points=100):
"""
Checks that an unprojection of a randomly projected point cloud
stays the same.
"""
for cam_type in (
SfMOrthographicCameras,
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
SfMPerspectiveCameras,
):
# init the cameras
cameras = TestCamerasCommon.init_random_cameras(cam_type, batch_size)
# xyz - the ground truth point cloud
xyz = torch.randn(batch_size, num_points, 3) * 0.3
# xyz in camera coordinates
xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz)
# depth = z-component of xyz_cam
depth = xyz_cam[:, :, 2:]
# project xyz
xyz_proj = cameras.transform_points(xyz)
xy, cam_depth = xyz_proj.split(2, dim=2)
# input to the unprojection function
xy_depth = torch.cat((xy, depth), dim=2)
for to_world in (False, True):
if to_world:
matching_xyz = xyz
else:
matching_xyz = xyz_cam
# if we have OpenGL cameras
# test for scaled_depth_input=True/False
if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
for scaled_depth_input in (True, False):
if scaled_depth_input:
xy_depth_ = xyz_proj
else:
xy_depth_ = xy_depth
xyz_unproj = cameras.unproject_points(
xy_depth_,
world_coordinates=to_world,
scaled_depth_input=scaled_depth_input,
)
self.assertTrue(
torch.allclose(xyz_unproj, matching_xyz, atol=1e-4)
)
else:
xyz_unproj = cameras.unproject_points(
xy_depth, world_coordinates=to_world
)
self.assertTrue(torch.allclose(xyz_unproj, matching_xyz, atol=1e-4))
def test_clone(self, batch_size: int = 10):
"""
Checks the clone function of the cameras.
"""
for cam_type in (
SfMOrthographicCameras,
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
SfMPerspectiveCameras,
):
cameras = TestCamerasCommon.init_random_cameras(cam_type, batch_size)
cameras = cameras.to(torch.device("cpu"))
cameras_clone = cameras.clone()
for var in cameras.__dict__.keys():
val = getattr(cameras, var)
val_clone = getattr(cameras_clone, var)
if torch.is_tensor(val):
self.assertClose(val, val_clone)
self.assertSeparate(val, val_clone)
else:
self.assertTrue(val == val_clone)
class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase): class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
def test_perspective(self): def test_perspective(self):
...@@ -679,4 +784,4 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase): ...@@ -679,4 +784,4 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
vertices = torch.randn([3, 4, 3], dtype=torch.float32) vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices) v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5) v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5)
self.assertClose(v1, v2) self.assertClose(v1, v2, atol=1e-6)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment