Commit dbf06b50 authored by facebook-github-bot's avatar facebook-github-bot
Browse files

Initial commit

fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
parents
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
from pytorch3d.renderer.blending import (
BlendParams,
hard_rgb_blend,
sigmoid_alpha_blend,
softmax_rgb_blend,
)
from pytorch3d.renderer.mesh.rasterizer import Fragments
def sigmoid_blend_naive(colors, fragments, blend_params):
"""
Naive for loop based implementation of distance based alpha calculation.
Only for test purposes.
"""
pix_to_face = fragments.pix_to_face
dists = fragments.dists
sigma = blend_params.sigma
N, H, W, K = pix_to_face.shape
device = pix_to_face.device
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)
for n in range(N):
for h in range(H):
for w in range(W):
alpha = 1.0
# Loop over k faces and calculate 2D distance based probability
# map.
for k in range(K):
if pix_to_face[n, h, w, k] >= 0:
prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
alpha *= 1.0 - prob # cumulative product
pixel_colors[n, h, w, :3] = colors[n, h, w, 0, :]
pixel_colors[n, h, w, 3] = 1.0 - alpha
pixel_colors = torch.clamp(pixel_colors, min=0, max=1.0)
return torch.flip(pixel_colors, [1])
def softmax_blend_naive(colors, fragments, blend_params):
"""
Naive for loop based implementation of softmax blending.
Only for test purposes.
"""
pix_to_face = fragments.pix_to_face
dists = fragments.dists
zbuf = fragments.zbuf
sigma = blend_params.sigma
gamma = blend_params.gamma
N, H, W, K = pix_to_face.shape
device = pix_to_face.device
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)
# Near and far clipping planes
zfar = 100.0
znear = 1.0
bk_color = blend_params.background_color
if not torch.is_tensor(bk_color):
bk_color = torch.tensor(bk_color, dtype=colors.dtype, device=device)
# Background color component
delta = np.exp(1e-10 / gamma) * 1e-10
delta = torch.tensor(delta).to(device=device)
for n in range(N):
for h in range(H):
for w in range(W):
alpha = 1.0
weights_k = torch.zeros(K)
zmax = 0.0
# Loop over K to find max z.
for k in range(K):
if pix_to_face[n, h, w, k] >= 0:
zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
if zinv > zmax:
zmax = zinv
# Loop over K faces to calculate 2D distance based probability
# map and zbuf based weights for colors.
for k in range(K):
if pix_to_face[n, h, w, k] >= 0:
zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
alpha *= 1.0 - prob # cumulative product
weights_k[k] = prob * torch.exp((zinv - zmax) / gamma)
denom = weights_k.sum() + delta
weights = weights_k / denom
cols = (weights[..., None] * colors[n, h, w, :, :]).sum(dim=0)
pixel_colors[n, h, w, :3] = cols
pixel_colors[n, h, w, :3] += (delta / denom) * bk_color
pixel_colors[n, h, w, 3] = 1.0 - alpha
pixel_colors = torch.clamp(pixel_colors, min=0, max=1.0)
return torch.flip(pixel_colors, [1])
class TestBlending(unittest.TestCase):
def setUp(self) -> None:
torch.manual_seed(42)
def test_hard_rgb_blend(self):
N, H, W, K = 5, 10, 10, 20
pix_to_face = torch.ones((N, H, W, K))
bary_coords = torch.ones((N, H, W, K, 3))
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=bary_coords,
zbuf=pix_to_face, # dummy
dists=pix_to_face, # dummy
)
colors = bary_coords.clone()
top_k = torch.randn((K, 3))
colors[..., :, :] = top_k
images = hard_rgb_blend(colors, fragments)
expected_vals = torch.ones((N, H, W, 4))
pix_cols = torch.ones_like(expected_vals[..., :3]) * top_k[0, :]
expected_vals[..., :3] = pix_cols
self.assertTrue(torch.allclose(images, expected_vals))
def test_sigmoid_alpha_blend(self):
"""
Test outputs of sigmoid alpha blend tensorised function match those of
the naive iterative version. Also check gradients match.
"""
# Create dummy outputs of rasterization simulating a cube in the centre
# of the image with surrounding padded values.
N, S, K = 1, 8, 2
pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
h = int(S / 2)
pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
s = int(S / 4)
e = int(0.75 * S)
pix_to_face[:, s:e, s:e, :] = pix_to_face_full
bary_coords = torch.ones((N, S, S, K, 3))
# randomly flip the sign of the distance
# (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists = torch.randn(size=(N, S, S, K))
dists1 = dists * random_sign_flip
dists2 = dists1.clone()
dists1.requires_grad = True
dists2.requires_grad = True
colors = torch.randn_like(bary_coords)
fragments1 = Fragments(
pix_to_face=pix_to_face,
bary_coords=bary_coords, # dummy
zbuf=pix_to_face, # dummy
dists=dists1,
)
fragments2 = Fragments(
pix_to_face=pix_to_face,
bary_coords=bary_coords, # dummy
zbuf=pix_to_face, # dummy
dists=dists2,
)
blend_params = BlendParams(sigma=2e-1)
images = sigmoid_alpha_blend(colors, fragments1, blend_params)
images_naive = sigmoid_blend_naive(colors, fragments2, blend_params)
self.assertTrue(torch.allclose(images, images_naive))
torch.manual_seed(231)
images.sum().backward()
self.assertTrue(hasattr(dists1, "grad"))
images_naive.sum().backward()
self.assertTrue(hasattr(dists2, "grad"))
self.assertTrue(torch.allclose(dists1.grad, dists2.grad, rtol=1e-5))
def test_softmax_rgb_blend(self):
# Create dummy outputs of rasterization simulating a cube in the centre
# of the image with surrounding padded values.
N, S, K = 1, 8, 2
pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64)
h = int(S / 2)
pix_to_face_full = torch.randint(size=(N, h, h, K), low=0, high=100)
s = int(S / 4)
e = int(0.75 * S)
pix_to_face[:, s:e, s:e, :] = pix_to_face_full
bary_coords = torch.ones((N, S, S, K, 3))
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
zbuf1 = torch.randn(size=(N, S, S, K))
# randomly flip the sign of the distance
# (-) means inside triangle, (+) means outside triangle.
dists1 = torch.randn(size=(N, S, S, K)) * random_sign_flip
dists2 = dists1.clone()
zbuf2 = zbuf1.clone()
dists1.requires_grad = True
dists2.requires_grad = True
zbuf1.requires_grad = True
zbuf2.requires_grad = True
colors = torch.randn_like(bary_coords)
fragments1 = Fragments(
pix_to_face=pix_to_face,
bary_coords=bary_coords, # dummy
zbuf=zbuf1,
dists=dists1,
)
fragments2 = Fragments(
pix_to_face=pix_to_face,
bary_coords=bary_coords, # dummy
zbuf=zbuf2,
dists=dists2,
)
blend_params = BlendParams(sigma=1e-1)
images = softmax_rgb_blend(colors, fragments1, blend_params)
images_naive = softmax_blend_naive(colors, fragments2, blend_params)
self.assertTrue(torch.allclose(images, images_naive))
# Check gradients.
images.sum().backward()
self.assertTrue(hasattr(dists1, "grad"))
self.assertTrue(hasattr(zbuf1, "grad"))
images_naive.sum().backward()
self.assertTrue(hasattr(dists2, "grad"))
self.assertTrue(hasattr(zbuf2, "grad"))
self.assertTrue(torch.allclose(dists1.grad, dists2.grad, atol=2e-5))
self.assertTrue(torch.allclose(zbuf1.grad, zbuf2.grad, atol=2e-5))
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Some of the code below is adapted from Soft Rasterizer (SoftRas)
#
# Copyright (c) 2017 Hiroharu Kato
# Copyright (c) 2018 Nikos Kolotouros
# Copyright (c) 2019 Shichen Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import numpy as np
import unittest
import torch
from pytorch3d.renderer.cameras import (
OpenGLOrthographicCameras,
OpenGLPerspectiveCameras,
SfMOrthographicCameras,
SfMPerspectiveCameras,
camera_position_from_spherical_angles,
get_world_to_view_transform,
look_at_rotation,
)
from pytorch3d.transforms import Transform3d
from pytorch3d.transforms.so3 import so3_exponential_map
from common_testing import TestCaseMixin
# Naive function adapted from SoftRasterizer for test purposes.
def perspective_project_naive(points, fov=60.0):
"""
Compute perspective projection from a given viewing angle.
Args:
points: (N, V, 3) representing the padded points.
viewing angle: degrees
Returns:
(N, V, 3) tensor of projected points preserving the view space z
coordinate (no z renormalization)
"""
device = points.device
halfFov = torch.tensor(
(fov / 2) / 180 * np.pi, dtype=torch.float32, device=device
)
scale = torch.tan(halfFov[None])
scale = scale[:, None]
z = points[:, :, 2]
x = points[:, :, 0] / z / scale
y = points[:, :, 1] / z / scale
points = torch.stack((x, y, z), dim=2)
return points
def sfm_perspective_project_naive(points, fx=1.0, fy=1.0, p0x=0.0, p0y=0.0):
"""
Compute perspective projection using focal length and principal point.
Args:
points: (N, V, 3) representing the padded points.
fx: world units
fy: world units
p0x: pixels
p0y: pixels
Returns:
(N, V, 3) tensor of projected points.
"""
z = points[:, :, 2]
x = (points[:, :, 0] * fx + p0x) / z
y = (points[:, :, 1] * fy + p0y) / z
points = torch.stack((x, y, 1.0 / z), dim=2)
return points
# Naive function adapted from SoftRasterizer for test purposes.
def orthographic_project_naive(points, scale_xyz=(1.0, 1.0, 1.0)):
"""
Compute orthographic projection from a given angle
Args:
points: (N, V, 3) representing the padded points.
scaled: (N, 3) scaling factors for each of xyz directions
Returns:
(N, V, 3) tensor of projected points preserving the view space z
coordinate (no z renormalization).
"""
if not torch.is_tensor(scale_xyz):
scale_xyz = torch.tensor(scale_xyz)
scale_xyz = scale_xyz.view(-1, 3)
z = points[:, :, 2]
x = points[:, :, 0] * scale_xyz[:, 0]
y = points[:, :, 1] * scale_xyz[:, 1]
points = torch.stack((x, y, z), dim=2)
return points
class TestCameraHelpers(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
np.random.seed(42)
def test_camera_position_from_angles_python_scalar(self):
dist = 2.7
elev = 90.0
azim = 0.0
expected_position = torch.tensor(
[0.0, 2.7, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
def test_camera_position_from_angles_python_scalar_radians(self):
dist = 2.7
elev = math.pi / 2
azim = 0.0
expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32)
expected_position = expected_position.view(1, 3)
position = camera_position_from_spherical_angles(
dist, elev, azim, degrees=False
)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
def test_camera_position_from_angles_torch_scalars(self):
dist = torch.tensor(2.7)
elev = torch.tensor(0.0)
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
def test_camera_position_from_angles_mixed_scalars(self):
dist = 2.7
elev = torch.tensor(0.0)
azim = 90.0
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
def test_camera_position_from_angles_torch_scalar_grads(self):
dist = torch.tensor(2.7, requires_grad=True)
elev = torch.tensor(45.0, requires_grad=True)
azim = torch.tensor(45.0)
position = camera_position_from_spherical_angles(dist, elev, azim)
position.sum().backward()
self.assertTrue(hasattr(elev, "grad"))
self.assertTrue(hasattr(dist, "grad"))
elev_grad = elev.grad.clone()
dist_grad = dist.grad.clone()
elev = math.pi / 180.0 * elev.detach()
azim = math.pi / 180.0 * azim
grad_dist = (
torch.cos(elev) * torch.sin(azim)
+ torch.sin(elev)
- torch.cos(elev) * torch.cos(azim)
)
grad_elev = (
-torch.sin(elev) * torch.sin(azim)
+ torch.cos(elev)
+ torch.sin(elev) * torch.cos(azim)
)
grad_elev = dist * (math.pi / 180.0) * grad_elev
self.assertTrue(torch.allclose(elev_grad, grad_elev))
self.assertTrue(torch.allclose(dist_grad, grad_dist))
def test_camera_position_from_angles_vectors(self):
dist = torch.tensor([2.0, 2.0])
elev = torch.tensor([0.0, 90.0])
azim = torch.tensor([90.0, 0.0])
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
def test_camera_position_from_angles_vectors_broadcast(self):
dist = torch.tensor([2.0, 3.0, 5.0])
elev = torch.tensor([0.0])
azim = torch.tensor([90.0])
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=3e-7))
def test_camera_position_from_angles_vectors_mixed_broadcast(self):
dist = torch.tensor([2.0, 3.0, 5.0])
elev = 0.0
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=3e-7))
def test_camera_position_from_angles_vectors_mixed_broadcast_grads(self):
dist = torch.tensor([2.0, 3.0, 5.0], requires_grad=True)
elev = torch.tensor(45.0, requires_grad=True)
azim = 45.0
position = camera_position_from_spherical_angles(dist, elev, azim)
position.sum().backward()
self.assertTrue(hasattr(elev, "grad"))
self.assertTrue(hasattr(dist, "grad"))
elev_grad = elev.grad.clone()
dist_grad = dist.grad.clone()
azim = torch.tensor(azim)
elev = math.pi / 180.0 * elev.detach()
azim = math.pi / 180.0 * azim
grad_dist = (
torch.cos(elev) * torch.sin(azim)
+ torch.sin(elev)
- torch.cos(elev) * torch.cos(azim)
)
grad_elev = (
-torch.sin(elev) * torch.sin(azim)
+ torch.cos(elev)
+ torch.sin(elev) * torch.cos(azim)
)
grad_elev = (dist * (math.pi / 180.0) * grad_elev).sum()
self.assertTrue(torch.allclose(elev_grad, grad_elev))
self.assertTrue(torch.allclose(dist_grad, grad_dist))
def test_camera_position_from_angles_vectors_bad_broadcast(self):
# Batch dim for broadcast must be N or 1
dist = torch.tensor([2.0, 3.0, 5.0])
elev = torch.tensor([0.0, 90.0])
azim = torch.tensor([90.0])
with self.assertRaises(ValueError):
camera_position_from_spherical_angles(dist, elev, azim)
def test_look_at_rotation_python_list(self):
camera_position = [[0.0, 0.0, -1.0]] # camera pointing along negative z
rot_mat = look_at_rotation(camera_position)
self.assertTrue(torch.allclose(rot_mat, torch.eye(3)[None], atol=2e-7))
def test_look_at_rotation_input_fail(self):
camera_position = [-1.0] # expected to have xyz positions
with self.assertRaises(ValueError):
look_at_rotation(camera_position)
def test_look_at_rotation_list_broadcast(self):
# fmt: off
camera_positions = [[0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]
rot_mats_expected = torch.tensor(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
],
[
[-1.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, -1.0] # noqa: E241, E201
],
],
dtype=torch.float32
)
# fmt: on
rot_mats = look_at_rotation(camera_positions)
self.assertTrue(torch.allclose(rot_mats, rot_mats_expected, atol=2e-7))
def test_look_at_rotation_tensor_broadcast(self):
# fmt: off
camera_positions = torch.tensor([
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0] # noqa: E241, E201
], dtype=torch.float32)
rot_mats_expected = torch.tensor(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
],
[
[-1.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, -1.0] # noqa: E241, E201
],
],
dtype=torch.float32
)
# fmt: on
rot_mats = look_at_rotation(camera_positions)
self.assertTrue(torch.allclose(rot_mats, rot_mats_expected, atol=2e-7))
def test_look_at_rotation_tensor_grad(self):
camera_position = torch.tensor([[0.0, 0.0, -1.0]], requires_grad=True)
rot_mat = look_at_rotation(camera_position)
rot_mat.sum().backward()
self.assertTrue(hasattr(camera_position, "grad"))
self.assertTrue(
torch.allclose(
camera_position.grad,
torch.zeros_like(camera_position),
atol=2e-7,
)
)
def test_view_transform(self):
T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
R = look_at_rotation(T)
RT = get_world_to_view_transform(R=R, T=T)
self.assertTrue(isinstance(RT, Transform3d))
def test_view_transform_class_method(self):
T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
R = look_at_rotation(T)
RT = get_world_to_view_transform(R=R, T=T)
for cam_type in (
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
SfMOrthographicCameras,
SfMPerspectiveCameras,
):
cam = cam_type(R=R, T=T)
RT_class = cam.get_world_to_view_transform()
self.assertTrue(
torch.allclose(RT.get_matrix(), RT_class.get_matrix())
)
self.assertTrue(isinstance(RT, Transform3d))
def test_get_camera_center(self, batch_size=10):
T = torch.randn(batch_size, 3)
R = so3_exponential_map(torch.randn(batch_size, 3) * 3.0)
for cam_type in (
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
SfMOrthographicCameras,
SfMPerspectiveCameras,
):
cam = cam_type(R=R, T=T)
C = cam.get_camera_center()
C_ = -torch.bmm(R, T[:, :, None])[:, :, 0]
self.assertTrue(torch.allclose(C, C_, atol=1e-05))
class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
def test_perspective(self):
far = 10.0
near = 1.0
cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=60.0)
P = cameras.get_projection_transform()
# vertices are at the far clipping plane so z gets mapped to 1.
vertices = torch.tensor([1, 2, far], dtype=torch.float32)
projected_verts = torch.tensor(
[np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(far * v1[..., 2], v2[..., 2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
# vertices are at the near clipping plane so z gets mapped to 0.0.
vertices[..., 2] = near
projected_verts = torch.tensor(
[np.sqrt(3) / near, 2 * np.sqrt(3) / near, 0.0], dtype=torch.float32
)
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_perspective_kwargs(self):
cameras = OpenGLPerspectiveCameras(znear=5.0, zfar=100.0, fov=0.0)
# Override defaults by passing in values to get_projection_transform
far = 10.0
P = cameras.get_projection_transform(znear=1.0, zfar=far, fov=60.0)
vertices = torch.tensor([1, 2, far], dtype=torch.float32)
projected_verts = torch.tensor(
[np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_perspective_mixed_inputs_broadcast(self):
far = torch.tensor([10.0, 20.0], dtype=torch.float32)
near = 1.0
fov = torch.tensor(60.0)
cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov)
P = cameras.get_projection_transform()
vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
z1 = 1.0 # vertices at far clipping plane so z = 1.0
z2 = (20.0 / (20.0 - 1.0) * 10.0 + -(20.0) / (20.0 - 1.0)) / 10.0
projected_verts = torch.tensor(
[
[np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z1],
[np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z2],
],
dtype=torch.float32,
)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_perspective_mixed_inputs_grad(self):
far = torch.tensor([10.0])
near = 1.0
fov = torch.tensor(60.0, requires_grad=True)
cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov)
P = cameras.get_projection_transform()
vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
vertices_batch = vertices[None, None, :]
v1 = P.transform_points(vertices_batch).squeeze()
v1.sum().backward()
self.assertTrue(hasattr(fov, "grad"))
fov_grad = fov.grad.clone()
half_fov_rad = (math.pi / 180.0) * fov.detach() / 2.0
grad_cotan = -(1.0 / (torch.sin(half_fov_rad) ** 2.0) * 1 / 2.0)
grad_fov = (math.pi / 180.0) * grad_cotan
grad_fov = (vertices[0] + vertices[1]) * grad_fov / 10.0
self.assertTrue(torch.allclose(fov_grad, grad_fov))
def test_camera_class_init(self):
device = torch.device("cuda:0")
cam = OpenGLPerspectiveCameras(znear=10.0, zfar=(100.0, 200.0))
# Check broadcasting
self.assertTrue(cam.znear.shape == (2,))
self.assertTrue(cam.zfar.shape == (2,))
# update znear element 1
cam[1].znear = 20.0
self.assertTrue(cam.znear[1] == 20.0)
# Get item and get value
c0 = cam[0]
self.assertTrue(c0.zfar == 100.0)
# Test to
new_cam = cam.to(device=device)
self.assertTrue(new_cam.device == device)
def test_get_full_transform(self):
cam = OpenGLPerspectiveCameras()
T = torch.tensor([0.0, 0.0, 1.0]).view(1, -1)
R = look_at_rotation(T)
P = cam.get_full_projection_transform(R=R, T=T)
self.assertTrue(isinstance(P, Transform3d))
self.assertTrue(torch.allclose(cam.R, R))
self.assertTrue(torch.allclose(cam.T, T))
def test_transform_points(self):
# Check transform_points methods works with default settings for
# RT and P
far = 10.0
cam = OpenGLPerspectiveCameras(znear=1.0, zfar=far, fov=60.0)
points = torch.tensor([1, 2, far], dtype=torch.float32)
points = points.view(1, 1, 3).expand(5, 10, -1)
projected_points = torch.tensor(
[np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32
)
projected_points = projected_points.view(1, 1, 3).expand(5, 10, -1)
new_points = cam.transform_points(points)
self.assertTrue(torch.allclose(new_points, projected_points))
class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
def test_orthographic(self):
far = 10.0
near = 1.0
cameras = OpenGLOrthographicCameras(znear=near, zfar=far)
P = cameras.get_projection_transform()
vertices = torch.tensor([1, 2, far], dtype=torch.float32)
projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
vertices[..., 2] = near
projected_verts[2] = 0.0
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_orthographic_scaled(self):
vertices = torch.tensor([1, 2, 0.5], dtype=torch.float32)
vertices = vertices[None, None, :]
scale = torch.tensor([[2.0, 0.5, 20]])
# applying the scale puts the z coordinate at the far clipping plane
# so the z is mapped to 1.0
projected_verts = torch.tensor([2, 1, 1], dtype=torch.float32)
cameras = OpenGLOrthographicCameras(
znear=1.0, zfar=10.0, scale_xyz=scale
)
P = cameras.get_projection_transform()
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices, scale)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
def test_orthographic_kwargs(self):
cameras = OpenGLOrthographicCameras(znear=5.0, zfar=100.0)
far = 10.0
P = cameras.get_projection_transform(znear=1.0, zfar=far)
vertices = torch.tensor([1, 2, far], dtype=torch.float32)
projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_orthographic_mixed_inputs_broadcast(self):
far = torch.tensor([10.0, 20.0])
near = 1.0
cameras = OpenGLOrthographicCameras(znear=near, zfar=far)
P = cameras.get_projection_transform()
vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
z2 = 1.0 / (20.0 - 1.0) * 10.0 + -(1.0) / (20.0 - 1.0)
projected_verts = torch.tensor(
[[1.0, 2.0, 1.0], [1.0, 2.0, z2]], dtype=torch.float32
)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
def test_orthographic_mixed_inputs_grad(self):
far = torch.tensor([10.0])
near = 1.0
scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
cameras = OpenGLOrthographicCameras(
znear=near, zfar=far, scale_xyz=scale
)
P = cameras.get_projection_transform()
vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
vertices_batch = vertices[None, None, :]
v1 = P.transform_points(vertices_batch)
v1.sum().backward()
self.assertTrue(hasattr(scale, "grad"))
scale_grad = scale.grad.clone()
grad_scale = torch.tensor(
[
[
vertices[0] * P._matrix[:, 0, 0],
vertices[1] * P._matrix[:, 1, 1],
vertices[2] * P._matrix[:, 2, 2],
]
]
)
self.assertTrue(torch.allclose(scale_grad, grad_scale))
class TestSfMOrthographicProjection(TestCaseMixin, unittest.TestCase):
def test_orthographic(self):
cameras = SfMOrthographicCameras()
P = cameras.get_projection_transform()
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
projected_verts = vertices.clone()
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
def test_orthographic_scaled(self):
focal_length_x = 10.0
focal_length_y = 15.0
cameras = SfMOrthographicCameras(
focal_length=((focal_length_x, focal_length_y),)
)
P = cameras.get_projection_transform()
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
projected_verts = vertices.clone()
projected_verts[:, :, 0] *= focal_length_x
projected_verts[:, :, 1] *= focal_length_y
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(
vertices, scale_xyz=(focal_length_x, focal_length_y, 1.0)
)
v3 = cameras.transform_points(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v3[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
def test_orthographic_kwargs(self):
cameras = SfMOrthographicCameras(
focal_length=5.0, principal_point=((2.5, 2.5),)
)
P = cameras.get_projection_transform(
focal_length=2.0, principal_point=((2.5, 3.5),)
)
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
projected_verts = vertices.clone()
projected_verts[:, :, :2] *= 2.0
projected_verts[:, :, 0] += 2.5
projected_verts[:, :, 1] += 3.5
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1, projected_verts))
class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
def test_perspective(self):
cameras = SfMPerspectiveCameras()
P = cameras.get_projection_transform()
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(vertices)
self.assertTrue(torch.allclose(v1, v2))
def test_perspective_scaled(self):
focal_length_x = 10.0
focal_length_y = 15.0
p0x = 15.0
p0y = 30.0
cameras = SfMPerspectiveCameras(
focal_length=((focal_length_x, focal_length_y),),
principal_point=((p0x, p0y),),
)
P = cameras.get_projection_transform()
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(
vertices, fx=focal_length_x, fy=focal_length_y, p0x=p0x, p0y=p0y
)
v3 = cameras.transform_points(vertices)
self.assertTrue(torch.allclose(v1, v2))
self.assertTrue(torch.allclose(v3[..., :2], v2[..., :2]))
def test_perspective_kwargs(self):
cameras = SfMPerspectiveCameras(
focal_length=5.0, principal_point=((2.5, 2.5),)
)
P = cameras.get_projection_transform(
focal_length=2.0, principal_point=((2.5, 3.5),)
)
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(
vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5
)
self.assertTrue(torch.allclose(v1, v2))
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn.functional as F
from pytorch3d.loss import chamfer_distance
class TestChamfer(unittest.TestCase):
@staticmethod
def init_pointclouds(batch_size: int = 10, P1: int = 32, P2: int = 64):
"""
Randomly initialize two batches of point clouds of sizes
(N, P1, D) and (N, P2, D) and return random normal vectors for
each batch of size (N, P1, 3) and (N, P2, 3).
"""
device = torch.device("cuda:0")
p1 = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device)
p1_normals = torch.rand(
(batch_size, P1, 3), dtype=torch.float32, device=device
)
p1_normals = p1_normals / p1_normals.norm(dim=2, p=2, keepdim=True)
p2 = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device)
p2_normals = torch.rand(
(batch_size, P2, 3), dtype=torch.float32, device=device
)
p2_normals = p2_normals / p2_normals.norm(dim=2, p=2, keepdim=True)
weights = torch.rand((batch_size,), dtype=torch.float32, device=device)
return p1, p2, p1_normals, p2_normals, weights
@staticmethod
def chamfer_distance_naive(p1, p2, p1_normals=None, p2_normals=None):
"""
Naive iterative implementation of nearest neighbor and chamfer distance.
Returns lists of the unreduced loss and loss_normals.
"""
N, P1, D = p1.shape
P2 = p2.size(1)
device = torch.device("cuda:0")
return_normals = p1_normals is not None and p2_normals is not None
dist = torch.zeros((N, P1, P2), dtype=torch.float32, device=device)
for n in range(N):
for i1 in range(P1):
for i2 in range(P2):
dist[n, i1, i2] = torch.sum(
(p1[n, i1, :] - p2[n, i2, :]) ** 2
)
loss = [
torch.min(dist, dim=2)[0], # (N, P1)
torch.min(dist, dim=1)[0], # (N, P2)
]
lnorm = [p1.new_zeros(()), p1.new_zeros(())]
if return_normals:
p1_index = dist.argmin(2).view(N, P1, 1).expand(N, P1, 3)
p2_index = dist.argmin(1).view(N, P2, 1).expand(N, P2, 3)
lnorm1 = 1 - torch.abs(
F.cosine_similarity(
p1_normals, p2_normals.gather(1, p1_index), dim=2, eps=1e-6
)
)
lnorm2 = 1 - torch.abs(
F.cosine_similarity(
p2_normals, p1_normals.gather(1, p2_index), dim=2, eps=1e-6
)
)
lnorm = [lnorm1, lnorm2] # [(N, P1), (N, P2)]
return loss, lnorm
def test_chamfer_default_no_normals(self):
"""
Compare chamfer loss with naive implementation using default
input values and no normals.
"""
N, P1, P2 = 7, 10, 18
p1, p2, _, _, weights = TestChamfer.init_pointclouds(N, P1, P2)
pred_loss, _ = TestChamfer.chamfer_distance_naive(p1, p2)
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
pred_loss = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss *= weights
pred_loss = pred_loss.sum() / weights.sum()
self.assertTrue(torch.allclose(loss, pred_loss))
self.assertTrue(loss_norm is None)
def test_chamfer_point_reduction(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for point_reduction in ["mean", "sum", "none"] and
batch_reduction = "none".
"""
N, P1, P2 = 7, 10, 18
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
N, P1, P2
)
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, p1_normals, p2_normals
)
# point_reduction = "mean".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="none",
point_reduction="mean",
)
pred_loss_mean = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss_mean *= weights
self.assertTrue(torch.allclose(loss, pred_loss_mean))
pred_loss_norm_mean = (
pred_loss_norm[0].sum(1) / P1 + pred_loss_norm[1].sum(1) / P2
)
pred_loss_norm_mean *= weights
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_mean))
# point_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="none",
point_reduction="sum",
)
pred_loss_sum = pred_loss[0].sum(1) + pred_loss[1].sum(1)
pred_loss_sum *= weights
self.assertTrue(torch.allclose(loss, pred_loss_sum))
pred_loss_norm_sum = pred_loss_norm[0].sum(1) + pred_loss_norm[1].sum(1)
pred_loss_norm_sum *= weights
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_sum))
# Error when point_reduction = "none" and batch_reduction = "none".
with self.assertRaises(ValueError):
chamfer_distance(
p1,
p2,
weights=weights,
batch_reduction="none",
point_reduction="none",
)
# Error when batch_reduction is not in ["none", "mean", "sum"].
with self.assertRaises(ValueError):
chamfer_distance(p1, p2, weights=weights, batch_reduction="max")
def test_chamfer_batch_reduction(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for batch_reduction in ["mean", "sum"] and point_reduction = "none".
"""
N, P1, P2 = 7, 10, 18
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
N, P1, P2
)
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, p1_normals, p2_normals
)
# batch_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="sum",
point_reduction="none",
)
pred_loss[0] *= weights.view(N, 1)
pred_loss[1] *= weights.view(N, 1)
pred_loss = pred_loss[0].sum() + pred_loss[1].sum()
self.assertTrue(torch.allclose(loss, pred_loss))
pred_loss_norm[0] *= weights.view(N, 1)
pred_loss_norm[1] *= weights.view(N, 1)
pred_loss_norm = pred_loss_norm[0].sum() + pred_loss_norm[1].sum()
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm))
# batch_reduction = "mean".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="mean",
point_reduction="none",
)
pred_loss /= weights.sum()
self.assertTrue(torch.allclose(loss, pred_loss))
pred_loss_norm /= weights.sum()
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm))
# Error when point_reduction is not in ["none", "mean", "sum"].
with self.assertRaises(ValueError):
chamfer_distance(p1, p2, weights=weights, point_reduction="max")
def test_chamfer_joint_reduction(self):
"""
Compare output of vectorized chamfer loss with naive implementation
for batch_reduction in ["mean", "sum"] and
point_reduction in ["mean", "sum"].
"""
N, P1, P2 = 7, 10, 18
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
N, P1, P2
)
pred_loss, pred_loss_norm = TestChamfer.chamfer_distance_naive(
p1, p2, p1_normals, p2_normals
)
# batch_reduction = "sum", point_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="sum",
point_reduction="sum",
)
pred_loss[0] *= weights.view(N, 1)
pred_loss[1] *= weights.view(N, 1)
pred_loss_sum = pred_loss[0].sum(1) + pred_loss[1].sum(1) # point sum
pred_loss_sum = pred_loss_sum.sum() # batch sum
self.assertTrue(torch.allclose(loss, pred_loss_sum))
pred_loss_norm[0] *= weights.view(N, 1)
pred_loss_norm[1] *= weights.view(N, 1)
pred_loss_norm_sum = pred_loss_norm[0].sum(1) + pred_loss_norm[1].sum(
1
) # point sum.
pred_loss_norm_sum = pred_loss_norm_sum.sum() # batch sum
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_sum))
# batch_reduction = "mean", point_reduction = "sum".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="mean",
point_reduction="sum",
)
pred_loss_sum /= weights.sum()
self.assertTrue(torch.allclose(loss, pred_loss_sum))
pred_loss_norm_sum /= weights.sum()
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_sum))
# batch_reduction = "sum", point_reduction = "mean".
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="sum",
point_reduction="mean",
)
pred_loss_mean = pred_loss[0].sum(1) / P1 + pred_loss[1].sum(1) / P2
pred_loss_mean = pred_loss_mean.sum()
self.assertTrue(torch.allclose(loss, pred_loss_mean))
pred_loss_norm_mean = (
pred_loss_norm[0].sum(1) / P1 + pred_loss_norm[1].sum(1) / P2
)
pred_loss_norm_mean = pred_loss_norm_mean.sum()
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_mean))
# batch_reduction = "mean", point_reduction = "mean". This is the default.
loss, loss_norm = chamfer_distance(
p1,
p2,
p1_normals,
p2_normals,
weights=weights,
batch_reduction="mean",
point_reduction="mean",
)
pred_loss_mean /= weights.sum()
self.assertTrue(torch.allclose(loss, pred_loss_mean))
pred_loss_norm_mean /= weights.sum()
self.assertTrue(torch.allclose(loss_norm, pred_loss_norm_mean))
def test_incorrect_weights(self):
N, P1, P2 = 16, 64, 128
device = torch.device("cuda:0")
p1 = torch.rand(
(N, P1, 3), dtype=torch.float32, device=device, requires_grad=True
)
p2 = torch.rand(
(N, P2, 3), dtype=torch.float32, device=device, requires_grad=True
)
weights = torch.zeros((N,), dtype=torch.float32, device=device)
loss, loss_norm = chamfer_distance(
p1, p2, weights=weights, batch_reduction="mean"
)
self.assertTrue(torch.allclose(loss.cpu(), torch.zeros((1,))))
self.assertTrue(loss.requires_grad)
self.assertTrue(torch.allclose(loss_norm.cpu(), torch.zeros((1,))))
self.assertTrue(loss_norm.requires_grad)
loss, loss_norm = chamfer_distance(
p1, p2, weights=weights, batch_reduction="none"
)
self.assertTrue(torch.allclose(loss.cpu(), torch.zeros((N,))))
self.assertTrue(loss.requires_grad)
self.assertTrue(torch.allclose(loss_norm.cpu(), torch.zeros((N,))))
self.assertTrue(loss_norm.requires_grad)
weights = torch.ones((N,), dtype=torch.float32, device=device) * -1
with self.assertRaises(ValueError):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
weights = torch.zeros((N - 1,), dtype=torch.float32, device=device)
with self.assertRaises(ValueError):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
@staticmethod
def chamfer_with_init(
batch_size: int, P1: int, P2: int, return_normals: bool
):
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
batch_size, P1, P2
)
torch.cuda.synchronize()
def loss():
loss, loss_normals = chamfer_distance(
p1, p2, p1_normals, p2_normals, weights=weights
)
torch.cuda.synchronize()
return loss
@staticmethod
def chamfer_naive_with_init(
batch_size: int, P1: int, P2: int, return_normals: bool
):
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
batch_size, P1, P2
)
torch.cuda.synchronize()
def loss():
loss, loss_normals = TestChamfer.chamfer_distance_naive(
p1, p2, p1_normals, p2_normals
)
torch.cuda.synchronize()
return loss
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.ops import cubify
class TestCubify(unittest.TestCase):
def test_allempty(self):
N, V = 32, 14
device = torch.device("cuda:0")
voxels = torch.zeros((N, V, V, V), dtype=torch.float32, device=device)
meshes = cubify(voxels, 0.5, 0)
self.assertTrue(meshes.isempty)
def test_cubify(self):
N, V = 4, 2
device = torch.device("cuda:0")
voxels = torch.zeros((N, V, V, V), dtype=torch.float32, device=device)
# 1st example: (top left corner, znear) is on
voxels[0, 0, 0, 0] = 1.0
# 2nd example: all are on
voxels[1] = 1.0
# 3rd example: empty
# 4th example
voxels[3, :, :, 1] = 1.0
voxels[3, 1, 1, 0] = 1.0
# compute cubify
meshes = cubify(voxels, 0.5, 0)
# 1st-check
verts, faces = meshes.get_mesh_verts_faces(0)
self.assertTrue(
torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))
)
self.assertTrue(
torch.allclose(
verts,
torch.tensor(
[
[-1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[1.0, -1.0, -1.0],
[1.0, -1.0, 1.0],
[-1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0],
[1.0, 1.0, -1.0],
[1.0, 1.0, 1.0],
],
dtype=torch.float32,
device=device,
),
)
)
self.assertTrue(
torch.allclose(
faces,
torch.tensor(
[
[0, 1, 4],
[1, 5, 4],
[4, 5, 6],
[5, 7, 6],
[0, 4, 6],
[0, 6, 2],
[0, 3, 1],
[0, 2, 3],
[6, 7, 3],
[6, 3, 2],
[1, 7, 5],
[1, 3, 7],
],
dtype=torch.int64,
device=device,
),
)
)
# 2nd-check
verts, faces = meshes.get_mesh_verts_faces(1)
self.assertTrue(
torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))
)
self.assertTrue(
torch.allclose(
verts,
torch.tensor(
[
[-1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, 3.0],
[1.0, -1.0, -1.0],
[1.0, -1.0, 1.0],
[1.0, -1.0, 3.0],
[3.0, -1.0, -1.0],
[3.0, -1.0, 1.0],
[3.0, -1.0, 3.0],
[-1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0],
[-1.0, 1.0, 3.0],
[1.0, 1.0, -1.0],
[1.0, 1.0, 3.0],
[3.0, 1.0, -1.0],
[3.0, 1.0, 1.0],
[3.0, 1.0, 3.0],
[-1.0, 3.0, -1.0],
[-1.0, 3.0, 1.0],
[-1.0, 3.0, 3.0],
[1.0, 3.0, -1.0],
[1.0, 3.0, 1.0],
[1.0, 3.0, 3.0],
[3.0, 3.0, -1.0],
[3.0, 3.0, 1.0],
[3.0, 3.0, 3.0],
],
dtype=torch.float32,
device=device,
),
)
)
self.assertTrue(
torch.allclose(
faces,
torch.tensor(
[
[0, 1, 9],
[1, 10, 9],
[0, 9, 12],
[0, 12, 3],
[0, 4, 1],
[0, 3, 4],
[1, 2, 10],
[2, 11, 10],
[1, 5, 2],
[1, 4, 5],
[2, 13, 11],
[2, 5, 13],
[3, 12, 14],
[3, 14, 6],
[3, 7, 4],
[3, 6, 7],
[14, 15, 7],
[14, 7, 6],
[4, 8, 5],
[4, 7, 8],
[15, 16, 8],
[15, 8, 7],
[5, 16, 13],
[5, 8, 16],
[9, 10, 17],
[10, 18, 17],
[17, 18, 20],
[18, 21, 20],
[9, 17, 20],
[9, 20, 12],
[10, 11, 18],
[11, 19, 18],
[18, 19, 21],
[19, 22, 21],
[11, 22, 19],
[11, 13, 22],
[20, 21, 23],
[21, 24, 23],
[12, 20, 23],
[12, 23, 14],
[23, 24, 15],
[23, 15, 14],
[21, 22, 24],
[22, 25, 24],
[24, 25, 16],
[24, 16, 15],
[13, 25, 22],
[13, 16, 25],
],
dtype=torch.int64,
device=device,
),
)
)
# 3rd-check
verts, faces = meshes.get_mesh_verts_faces(2)
self.assertTrue(verts.size(0) == 0)
self.assertTrue(faces.size(0) == 0)
# 4th-check
verts, faces = meshes.get_mesh_verts_faces(3)
self.assertTrue(
torch.allclose(
verts,
torch.tensor(
[
[1.0, -1.0, -1.0],
[1.0, -1.0, 1.0],
[1.0, -1.0, 3.0],
[3.0, -1.0, -1.0],
[3.0, -1.0, 1.0],
[3.0, -1.0, 3.0],
[-1.0, 1.0, 1.0],
[-1.0, 1.0, 3.0],
[1.0, 1.0, -1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 3.0],
[3.0, 1.0, -1.0],
[3.0, 1.0, 1.0],
[3.0, 1.0, 3.0],
[-1.0, 3.0, 1.0],
[-1.0, 3.0, 3.0],
[1.0, 3.0, -1.0],
[1.0, 3.0, 1.0],
[1.0, 3.0, 3.0],
[3.0, 3.0, -1.0],
[3.0, 3.0, 1.0],
[3.0, 3.0, 3.0],
],
dtype=torch.float32,
device=device,
),
)
)
self.assertTrue(
torch.allclose(
faces,
torch.tensor(
[
[0, 1, 8],
[1, 9, 8],
[0, 8, 11],
[0, 11, 3],
[0, 4, 1],
[0, 3, 4],
[11, 12, 4],
[11, 4, 3],
[1, 2, 9],
[2, 10, 9],
[1, 5, 2],
[1, 4, 5],
[12, 13, 5],
[12, 5, 4],
[2, 13, 10],
[2, 5, 13],
[6, 7, 14],
[7, 15, 14],
[14, 15, 17],
[15, 18, 17],
[6, 14, 17],
[6, 17, 9],
[6, 10, 7],
[6, 9, 10],
[7, 18, 15],
[7, 10, 18],
[8, 9, 16],
[9, 17, 16],
[16, 17, 19],
[17, 20, 19],
[8, 16, 19],
[8, 19, 11],
[19, 20, 12],
[19, 12, 11],
[17, 18, 20],
[18, 21, 20],
[20, 21, 13],
[20, 13, 12],
[10, 21, 18],
[10, 13, 21],
],
dtype=torch.int64,
device=device,
),
)
)
@staticmethod
def cubify_with_init(batch_size: int, V: int):
device = torch.device("cuda:0")
voxels = torch.rand(
(batch_size, V, V, V), dtype=torch.float32, device=device
)
torch.cuda.synchronize()
def convert():
cubify(voxels, 0.5)
torch.cuda.synchronize()
return convert
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn as nn
from pytorch3d import _C
from pytorch3d.ops.graph_conv import (
GraphConv,
gather_scatter,
gather_scatter_python,
)
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils import ico_sphere
class TestGraphConv(unittest.TestCase):
def test_undirected(self):
dtype = torch.float32
device = torch.device("cuda:0")
verts = torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype, device=device
)
edges = torch.tensor([[0, 1], [0, 2]], device=device)
w0 = torch.tensor([[1, 1, 1]], dtype=dtype, device=device)
w1 = torch.tensor([[-1, -1, -1]], dtype=dtype, device=device)
expected_y = torch.tensor(
[
[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9],
[4 + 5 + 6 - 1 - 2 - 3],
[7 + 8 + 9 - 1 - 2 - 3],
],
dtype=dtype,
device=device,
)
conv = GraphConv(3, 1, directed=False).to(device)
conv.w0.weight.data.copy_(w0)
conv.w0.bias.data.zero_()
conv.w1.weight.data.copy_(w1)
conv.w1.bias.data.zero_()
y = conv(verts, edges)
self.assertTrue(torch.allclose(y, expected_y))
def test_no_edges(self):
dtype = torch.float32
verts = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
edges = torch.zeros(0, 2, dtype=torch.int64)
w0 = torch.tensor([[1, -1, -2]], dtype=dtype)
expected_y = torch.tensor(
[[1 - 2 - 2 * 3], [4 - 5 - 2 * 6], [7 - 8 - 2 * 9]], dtype=dtype
)
conv = GraphConv(3, 1).to(dtype)
conv.w0.weight.data.copy_(w0)
conv.w0.bias.data.zero_()
y = conv(verts, edges)
self.assertTrue(torch.allclose(y, expected_y))
def test_no_verts_and_edges(self):
dtype = torch.float32
verts = torch.tensor([], dtype=dtype, requires_grad=True)
edges = torch.tensor([], dtype=dtype)
w0 = torch.tensor([[1, -1, -2]], dtype=dtype)
conv = GraphConv(3, 1).to(dtype)
conv.w0.weight.data.copy_(w0)
conv.w0.bias.data.zero_()
y = conv(verts, edges)
self.assertTrue(torch.allclose(y, torch.tensor([])))
self.assertTrue(y.requires_grad)
def test_directed(self):
dtype = torch.float32
verts = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
edges = torch.tensor([[0, 1], [0, 2]])
w0 = torch.tensor([[1, 1, 1]], dtype=dtype)
w1 = torch.tensor([[-1, -1, -1]], dtype=dtype)
expected_y = torch.tensor(
[[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9], [4 + 5 + 6], [7 + 8 + 9]],
dtype=dtype,
)
conv = GraphConv(3, 1, directed=True).to(dtype)
conv.w0.weight.data.copy_(w0)
conv.w0.bias.data.zero_()
conv.w1.weight.data.copy_(w1)
conv.w1.bias.data.zero_()
y = conv(verts, edges)
self.assertTrue(torch.allclose(y, expected_y))
def test_backward(self):
device = torch.device("cuda:0")
mesh = ico_sphere()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
verts_cuda = verts.clone().to(device)
edges_cuda = edges.clone().to(device)
verts.requires_grad = True
verts_cuda.requires_grad = True
neighbor_sums_cuda = gather_scatter(verts_cuda, edges_cuda, False)
neighbor_sums = gather_scatter_python(verts, edges, False)
neighbor_sums_cuda.sum().backward()
neighbor_sums.sum().backward()
self.assertTrue(torch.allclose(verts.grad.cpu(), verts_cuda.grad.cpu()))
def test_repr(self):
conv = GraphConv(32, 64, directed=True)
self.assertEqual(repr(conv), "GraphConv(32 -> 64, directed=True)")
def test_cpu_cuda_tensor_error(self):
device = torch.device("cuda:0")
verts = torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=torch.float32,
device=device,
)
edges = torch.tensor([[0, 1], [0, 2]])
conv = GraphConv(3, 1, directed=True).to(torch.float32)
with self.assertRaises(Exception) as err:
conv(verts, edges)
self.assertTrue(
"tensors must be on the same device." in str(err.exception)
)
def test_gather_scatter(self):
"""
Check gather_scatter cuda and python versions give the same results.
Check that gather_scatter cuda version throws an error if cpu tensors
are given as input.
"""
device = torch.device("cuda:0")
mesh = ico_sphere()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
w0 = nn.Linear(3, 1)
input = w0(verts)
# output
output_cpu = gather_scatter_python(input, edges, False)
output_cuda = _C.gather_scatter(
input.to(device=device), edges.to(device=device), False, False
)
self.assertTrue(torch.allclose(output_cuda.cpu(), output_cpu))
with self.assertRaises(Exception) as err:
_C.gather_scatter(input.cpu(), edges.cpu(), False, False)
self.assertTrue("Not implemented on the CPU" in str(err.exception))
# directed
output_cpu = gather_scatter_python(input, edges, True)
output_cuda = _C.gather_scatter(
input.to(device=device), edges.to(device=device), True, False
)
self.assertTrue(torch.allclose(output_cuda.cpu(), output_cpu))
@staticmethod
def graph_conv_forward_backward(
gconv_dim,
num_meshes,
num_verts,
num_faces,
directed: bool,
backend: str = "cuda",
):
device = torch.device("cuda") if backend == "cuda" else "cpu"
verts_list = torch.tensor(
num_verts * [[0.11, 0.22, 0.33]], device=device
).view(-1, 3)
faces_list = torch.tensor(num_faces * [[1, 2, 3]], device=device).view(
-1, 3
)
meshes = Meshes(num_meshes * [verts_list], num_meshes * [faces_list])
gconv = GraphConv(gconv_dim, gconv_dim, directed=directed)
gconv.to(device)
edges = meshes.edges_packed()
total_verts = meshes.verts_packed().shape[0]
# Features.
x = torch.randn(
total_verts, gconv_dim, device=device, requires_grad=True
)
torch.cuda.synchronize()
def run_graph_conv():
y1 = gconv(x, edges)
y1.sum().backward()
torch.cuda.synchronize()
return run_graph_conv
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
from pytorch3d.renderer.lighting import DirectionalLights, PointLights
from pytorch3d.transforms import RotateAxisAngle
from common_testing import TestCaseMixin
class TestLights(TestCaseMixin, unittest.TestCase):
def test_init_lights(self):
"""
Initialize Lights class with the default values.
"""
device = torch.device("cuda:0")
light = DirectionalLights(device=device)
keys = ["ambient_color", "diffuse_color", "specular_color", "direction"]
for k in keys:
prop = getattr(light, k)
self.assertTrue(torch.is_tensor(prop))
self.assertTrue(prop.device == device)
self.assertTrue(prop.shape == (1, 3))
light = PointLights(device=device)
keys = ["ambient_color", "diffuse_color", "specular_color", "location"]
for k in keys:
prop = getattr(light, k)
self.assertTrue(torch.is_tensor(prop))
self.assertTrue(prop.device == device)
self.assertTrue(prop.shape == (1, 3))
def test_lights_clone_to(self):
device = torch.device("cuda:0")
cpu = torch.device("cpu")
light = DirectionalLights()
new_light = light.clone().to(device)
keys = ["ambient_color", "diffuse_color", "specular_color", "direction"]
for k in keys:
prop = getattr(light, k)
new_prop = getattr(new_light, k)
self.assertTrue(prop.device == cpu)
self.assertTrue(new_prop.device == device)
self.assertSeparate(new_prop, prop)
light = PointLights()
new_light = light.clone().to(device)
keys = ["ambient_color", "diffuse_color", "specular_color", "location"]
for k in keys:
prop = getattr(light, k)
new_prop = getattr(new_light, k)
self.assertTrue(prop.device == cpu)
self.assertTrue(new_prop.device == device)
self.assertSeparate(new_prop, prop)
def test_lights_accessor(self):
d_light = DirectionalLights(
ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))
)
p_light = PointLights(ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)))
for light in [d_light, p_light]:
# Update element
color = (0.5, 0.5, 0.5)
light[1].ambient_color = color
self.assertTrue(
torch.allclose(light.ambient_color[1], torch.tensor(color))
)
# Get item and get value
l0 = light[0]
self.assertTrue(
torch.allclose(l0.ambient_color, torch.tensor((0.0, 0.0, 0.0)))
)
def test_initialize_lights_broadcast(self):
light = DirectionalLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(1, 3),
specular_color=torch.randn(1, 3),
)
keys = ["ambient_color", "diffuse_color", "specular_color", "direction"]
for k in keys:
prop = getattr(light, k)
self.assertTrue(prop.shape == (10, 3))
light = PointLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(1, 3),
specular_color=torch.randn(1, 3),
)
keys = ["ambient_color", "diffuse_color", "specular_color", "location"]
for k in keys:
prop = getattr(light, k)
self.assertTrue(prop.shape == (10, 3))
def test_initialize_lights_broadcast_fail(self):
"""
Batch dims have to be the same or 1.
"""
with self.assertRaises(ValueError):
DirectionalLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
)
with self.assertRaises(ValueError):
PointLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
)
def test_initialize_lights_dimensions_fail(self):
"""
Color should have shape (N, 3) or (1, 3)
"""
with self.assertRaises(ValueError):
DirectionalLights(ambient_color=torch.randn(10, 4))
with self.assertRaises(ValueError):
DirectionalLights(direction=torch.randn(10, 4))
with self.assertRaises(ValueError):
PointLights(ambient_color=torch.randn(10, 4))
with self.assertRaises(ValueError):
PointLights(location=torch.randn(10, 4))
class TestDiffuseLighting(unittest.TestCase):
def test_diffuse_directional_lights(self):
"""
Test with a single point where:
1) the normal and light direction are 45 degrees apart.
2) the normal and light direction are 90 degrees apart. The output
should be zero for this case
"""
color = torch.tensor([1, 1, 1], dtype=torch.float32)
direction = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
normals = normals[None, None, :]
expected_output = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
)
expected_output = expected_output.view(-1, 1, 3)
light = DirectionalLights(diffuse_color=color, direction=direction)
output_light = light.diffuse(normals=normals)
self.assertTrue(torch.allclose(output_light, expected_output))
# Change light direction to be 90 degrees apart from normal direction.
direction = torch.tensor([0, 1, 0], dtype=torch.float32)
light.direction = direction
expected_output = torch.zeros_like(expected_output)
output_light = light.diffuse(normals=normals)
self.assertTrue(torch.allclose(output_light, expected_output))
def test_diffuse_point_lights(self):
"""
Test with a single point at the origin. Test two cases:
1) the point light is at (1, 0, 1) hence the light direction is 45
degrees apart from the normal direction
1) the point light is at (0, 1, 0) hence the light direction is 90
degrees apart from the normal direction. The output
should be zero for this case
"""
color = torch.tensor([1, 1, 1], dtype=torch.float32)
location = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_output = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
)
expected_output = expected_output.view(-1, 1, 3)
light = PointLights(
diffuse_color=color[None, :], location=location[None, :]
)
output_light = light.diffuse(
points=points[None, None, :], normals=normals[None, None, :]
)
self.assertTrue(torch.allclose(output_light, expected_output))
# Change light direction to be 90 degrees apart from normal direction.
location = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_output = torch.zeros_like(expected_output)
light = PointLights(
diffuse_color=color[None, :], location=location[None, :]
)
output_light = light.diffuse(
points=points[None, None, :], normals=normals[None, None, :]
)
self.assertTrue(torch.allclose(output_light, expected_output))
def test_diffuse_batched(self):
"""
Test with a batch where each batch element has one point
where the normal and light direction are 45 degrees apart.
"""
batch_size = 10
color = torch.tensor([1, 1, 1], dtype=torch.float32)
direction = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_out = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
)
# Reshape
direction = direction.view(-1, 3).expand(batch_size, -1)
normals = normals.view(-1, 1, 3).expand(batch_size, -1, -1)
color = color.view(-1, 3).expand(batch_size, -1)
expected_out = expected_out.view(-1, 1, 3).expand(batch_size, 1, 3)
lights = DirectionalLights(diffuse_color=color, direction=direction)
output_light = lights.diffuse(normals=normals)
self.assertTrue(torch.allclose(output_light, expected_out))
def test_diffuse_batched_broadcast_inputs(self):
"""
Test with a batch where each batch element has one point
where the normal and light direction are 45 degrees apart.
The color and direction are the same for each batch element.
"""
batch_size = 10
color = torch.tensor([1, 1, 1], dtype=torch.float32)
direction = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_out = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
)
# Reshape
normals = normals.view(-1, 1, 3).expand(batch_size, -1, -1)
expected_out = expected_out.view(-1, 1, 3).expand(batch_size, 1, 3)
# Don't expand the direction or color. Broadcasting should happen
# in the diffuse function.
direction = direction.view(1, 3)
color = color.view(1, 3)
lights = DirectionalLights(diffuse_color=color, direction=direction)
output_light = lights.diffuse(normals=normals)
self.assertTrue(torch.allclose(output_light, expected_out))
def test_diffuse_batched_arbitrary_input_dims(self):
"""
Test with a batch of inputs where shape of the input is mimicking the
shape in a shading function i.e. an interpolated normal per pixel for
top K faces per pixel.
"""
N, H, W, K = 16, 256, 256, 100
device = torch.device("cuda:0")
color = torch.tensor([1, 1, 1], dtype=torch.float32, device=device)
direction = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
device=device,
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32, device=device)
normals = normals.view(1, 1, 1, 1, 3).expand(N, H, W, K, -1)
direction = direction.view(1, 3)
color = color.view(1, 3)
expected_output = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
device=device,
)
expected_output = expected_output.view(1, 1, 1, 1, 3)
expected_output = expected_output.expand(N, H, W, K, -1)
lights = DirectionalLights(diffuse_color=color, direction=direction)
output_light = lights.diffuse(normals=normals)
self.assertTrue(torch.allclose(output_light, expected_output))
def test_diffuse_batched_packed(self):
"""
Test with a batch of 2 meshes each of which has faces on a single plane.
The normal and light direction are 45 degrees apart for the first mesh
and 90 degrees apart for the second mesh.
The points and normals are in the packed format i.e. no batch dimension.
"""
verts_packed = torch.rand((10, 3)) # points aren't used
faces_per_mesh = [6, 4]
mesh_to_vert_idx = [0] * faces_per_mesh[0] + [1] * faces_per_mesh[1]
mesh_to_vert_idx = torch.tensor(mesh_to_vert_idx, dtype=torch.int64)
color = torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=torch.float32)
direction = torch.tensor(
[
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
[0, 1, 0], # 90 degrees to normal so zero diffuse light
],
dtype=torch.float32,
)
normals = torch.tensor([[0, 0, 1], [0, 0, 1]], dtype=torch.float32)
expected_output = torch.zeros_like(verts_packed, dtype=torch.float32)
expected_output[:6, :] += 1 / np.sqrt(2)
expected_output[6:, :] = 0.0
lights = DirectionalLights(
diffuse_color=color[mesh_to_vert_idx, :],
direction=direction[mesh_to_vert_idx, :],
)
output_light = lights.diffuse(normals=normals[mesh_to_vert_idx, :])
self.assertTrue(torch.allclose(output_light, expected_output))
class TestSpecularLighting(unittest.TestCase):
def test_specular_directional_lights(self):
"""
Specular highlights depend on the camera position as well as the light
position/direction.
Test with a single point where:
1) the normal and light direction are -45 degrees apart and the normal
and camera position are +45 degrees apart. The reflected light ray
will be perfectly aligned with the camera so the output is 1.0.
2) the normal and light direction are -45 degrees apart and the
camera position is behind the point. The output should be zero for
this case.
"""
color = torch.tensor([1, 0, 1], dtype=torch.float32)
direction = torch.tensor(
[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_output = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32)
expected_output = expected_output.view(-1, 1, 3)
lights = DirectionalLights(specular_color=color, direction=direction)
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
camera_position=camera_position[None, :],
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_output))
# Change camera position to be behind the point.
camera_position = torch.tensor(
[+1 / np.sqrt(2), -1 / np.sqrt(2), 0], dtype=torch.float32
)
expected_output = torch.zeros_like(expected_output)
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
camera_position=camera_position[None, :],
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_output))
def test_specular_point_lights(self):
"""
Replace directional lights with point lights and check the output
is the same.
Test an additional case where the angle between the light reflection
direction and the view direction is 30 degrees.
"""
color = torch.tensor([1, 0, 1], dtype=torch.float32)
location = torch.tensor([-1, 1, 0], dtype=torch.float32)
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_output = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32)
expected_output = expected_output.view(-1, 1, 3)
lights = PointLights(
specular_color=color[None, :], location=location[None, :]
)
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
camera_position=camera_position[None, :],
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_output))
# Change camera position to be behind the point
camera_position = torch.tensor(
[+1 / np.sqrt(2), -1 / np.sqrt(2), 0], dtype=torch.float32
)
expected_output = torch.zeros_like(expected_output)
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
camera_position=camera_position[None, :],
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_output))
# Change camera direction to be 30 degrees from the reflection direction
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
rotate_30 = RotateAxisAngle(-30, axis="z")
camera_position = rotate_30.transform_points(camera_position[None, :])
expected_output = torch.tensor(
[np.cos(30.0 * np.pi / 180), 0.0, np.cos(30.0 * np.pi / 180)],
dtype=torch.float32,
)
expected_output = expected_output.view(-1, 1, 3)
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
camera_position=camera_position[None, :],
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_output ** 10))
def test_specular_batched(self):
batch_size = 10
color = torch.tensor([1, 0, 1], dtype=torch.float32)
direction = torch.tensor(
[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_out = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32)
# Reshape
direction = direction.view(1, 3).expand(batch_size, -1)
camera_position = camera_position.view(1, 3).expand(batch_size, -1)
normals = normals.view(1, 1, 3).expand(batch_size, -1, -1)
points = points.view(1, 1, 3).expand(batch_size, -1, -1)
color = color.view(1, 3).expand(batch_size, -1)
expected_out = expected_out.view(1, 1, 3).expand(batch_size, 1, 3)
lights = DirectionalLights(specular_color=color, direction=direction)
output_light = lights.specular(
points=points,
normals=normals,
camera_position=camera_position,
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_out))
def test_specular_batched_broadcast_inputs(self):
batch_size = 10
color = torch.tensor([1, 0, 1], dtype=torch.float32)
direction = torch.tensor(
[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_out = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32)
# Reshape
normals = normals.view(1, 1, 3).expand(batch_size, -1, -1)
points = points.view(1, 1, 3).expand(batch_size, -1, -1)
expected_out = expected_out.view(1, 1, 3).expand(batch_size, 1, 3)
# Don't expand the direction, color or camera_position.
# These should be broadcasted in the specular function
direction = direction.view(1, 3)
camera_position = camera_position.view(1, 3)
color = color.view(1, 3)
lights = DirectionalLights(specular_color=color, direction=direction)
output_light = lights.specular(
points=points,
normals=normals,
camera_position=camera_position,
shininess=torch.tensor(10),
)
self.assertTrue(torch.allclose(output_light, expected_out))
def test_specular_batched_arbitrary_input_dims(self):
"""
Test with a batch of inputs where shape of the input is mimicking the
shape expected after rasterization i.e. a normal per pixel for
top K faces per pixel.
"""
device = torch.device("cuda:0")
N, H, W, K = 16, 256, 256, 100
color = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)
direction = torch.tensor(
[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
camera_position = torch.tensor(
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0], dtype=torch.float32
)
points = torch.tensor([0, 0, 0], dtype=torch.float32, device=device)
normals = torch.tensor([0, 1, 0], dtype=torch.float32, device=device)
points = points.view(1, 1, 1, 1, 3).expand(N, H, W, K, 3)
normals = normals.view(1, 1, 1, 1, 3).expand(N, H, W, K, 3)
direction = direction.view(1, 3)
color = color.view(1, 3)
camera_position = camera_position.view(1, 3)
expected_output = torch.tensor(
[1.0, 0.0, 1.0], dtype=torch.float32, device=device
)
expected_output = expected_output.view(-1, 1, 1, 1, 3)
expected_output = expected_output.expand(N, H, W, K, -1)
lights = DirectionalLights(specular_color=color, direction=direction)
output_light = lights.specular(
points=points,
normals=normals,
camera_position=camera_position,
shininess=10.0,
)
self.assertTrue(torch.allclose(output_light, expected_output))
def test_specular_batched_packed(self):
"""
Test with a batch of 2 meshes each of which has faces on a single plane.
The points and normals are in the packed format i.e. no batch dimension.
"""
faces_per_mesh = [6, 4]
mesh_to_vert_idx = [0] * faces_per_mesh[0] + [1] * faces_per_mesh[1]
mesh_to_vert_idx = torch.tensor(mesh_to_vert_idx, dtype=torch.int64)
color = torch.tensor([[1, 1, 1], [1, 0, 1]], dtype=torch.float32)
direction = torch.tensor(
[[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], [-1, 1, 0]],
dtype=torch.float32,
)
camera_position = torch.tensor(
[
[+1 / np.sqrt(2), 1 / np.sqrt(2), 0],
[+1 / np.sqrt(2), -1 / np.sqrt(2), 0],
],
dtype=torch.float32,
)
points = torch.tensor([[0, 0, 0]], dtype=torch.float32)
normals = torch.tensor([[0, 1, 0], [0, 1, 0]], dtype=torch.float32)
expected_output = torch.zeros((10, 3), dtype=torch.float32)
expected_output[:6, :] += 1.0
lights = DirectionalLights(
specular_color=color[mesh_to_vert_idx, :],
direction=direction[mesh_to_vert_idx, :],
)
output_light = lights.specular(
points=points.view(-1, 3).expand(10, -1),
normals=normals.view(-1, 3)[mesh_to_vert_idx, :],
camera_position=camera_position[mesh_to_vert_idx, :],
shininess=10.0,
)
self.assertTrue(torch.allclose(output_light, expected_output))
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.renderer.materials import Materials
from common_testing import TestCaseMixin
class TestMaterials(TestCaseMixin, unittest.TestCase):
def test_init(self):
"""
Initialize Materials class with the default values.
"""
device = torch.device("cuda:0")
mat = Materials(device=device)
self.assertTrue(torch.is_tensor(mat.ambient_color))
self.assertTrue(torch.is_tensor(mat.diffuse_color))
self.assertTrue(torch.is_tensor(mat.specular_color))
self.assertTrue(torch.is_tensor(mat.shininess))
self.assertTrue(mat.ambient_color.device == device)
self.assertTrue(mat.diffuse_color.device == device)
self.assertTrue(mat.specular_color.device == device)
self.assertTrue(mat.shininess.device == device)
self.assertTrue(mat.ambient_color.shape == (1, 3))
self.assertTrue(mat.diffuse_color.shape == (1, 3))
self.assertTrue(mat.specular_color.shape == (1, 3))
self.assertTrue(mat.shininess.shape == (1,))
def test_materials_clone_to(self):
device = torch.device("cuda:0")
cpu = torch.device("cpu")
mat = Materials()
new_mat = mat.clone().to(device)
self.assertTrue(mat.ambient_color.device == cpu)
self.assertTrue(mat.diffuse_color.device == cpu)
self.assertTrue(mat.specular_color.device == cpu)
self.assertTrue(mat.shininess.device == cpu)
self.assertTrue(new_mat.ambient_color.device == device)
self.assertTrue(new_mat.diffuse_color.device == device)
self.assertTrue(new_mat.specular_color.device == device)
self.assertTrue(new_mat.shininess.device == device)
self.assertSeparate(new_mat.ambient_color, mat.ambient_color)
self.assertSeparate(new_mat.diffuse_color, mat.diffuse_color)
self.assertSeparate(new_mat.specular_color, mat.specular_color)
self.assertSeparate(new_mat.shininess, mat.shininess)
def test_initialize_materials_broadcast(self):
materials = Materials(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(1, 3),
specular_color=torch.randn(1, 3),
shininess=torch.randn(1),
)
self.assertTrue(materials.ambient_color.shape == (10, 3))
self.assertTrue(materials.diffuse_color.shape == (10, 3))
self.assertTrue(materials.specular_color.shape == (10, 3))
self.assertTrue(materials.shininess.shape == (10,))
def test_initialize_materials_broadcast_fail(self):
"""
Batch dims have to be the same or 1.
"""
with self.assertRaises(ValueError):
Materials(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
)
def test_initialize_materials_dimensions_fail(self):
"""
Color should have shape (N, 3) or (1, 3), Shininess should have shape
(1), (1, 1), (N) or (N, 1)
"""
with self.assertRaises(ValueError):
Materials(ambient_color=torch.randn(10, 4))
with self.assertRaises(ValueError):
Materials(shininess=torch.randn(10, 2))
def test_initialize_materials_mixed_inputs(self):
mat = Materials(
ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),)
)
self.assertTrue(mat.ambient_color.shape == (1, 3))
self.assertTrue(mat.diffuse_color.shape == (1, 3))
def test_initialize_materials_mixed_inputs_broadcast(self):
mat = Materials(
ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),)
)
self.assertTrue(mat.ambient_color.shape == (10, 3))
self.assertTrue(mat.diffuse_color.shape == (10, 3))
self.assertTrue(mat.specular_color.shape == (10, 3))
self.assertTrue(mat.shininess.shape == (10,))
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.loss import mesh_edge_loss
from pytorch3d.structures import Meshes
from test_sample_points_from_meshes import TestSamplePoints
class TestMeshEdgeLoss(unittest.TestCase):
def test_empty_meshes(self):
device = torch.device("cuda:0")
target_length = 0
N = 10
V = 32
verts_list = []
faces_list = []
for _ in range(N):
vn = torch.randint(3, high=V, size=(1,))[0].item()
verts = torch.rand((vn, 3), dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
loss = mesh_edge_loss(mesh, target_length=target_length)
self.assertTrue(
torch.allclose(
loss, torch.tensor([0.0], dtype=torch.float32, device=device)
)
)
self.assertTrue(loss.requires_grad)
@staticmethod
def mesh_edge_loss_naive(meshes, target_length: float = 0.0):
"""
Naive iterative implementation of mesh loss calculation.
"""
edges_packed = meshes.edges_packed()
verts_packed = meshes.verts_packed()
edge_to_mesh = meshes.edges_packed_to_mesh_idx()
N = len(meshes)
device = meshes.device
valid = meshes.valid
predlosses = torch.zeros((N,), dtype=torch.float32, device=device)
for b in range(N):
if valid[b] == 0:
continue
mesh_edges = edges_packed[edge_to_mesh == b]
verts_edges = verts_packed[mesh_edges]
num_edges = mesh_edges.size(0)
for e in range(num_edges):
v0, v1 = verts_edges[e, 0], verts_edges[e, 1]
predlosses[b] += (
(v0 - v1).norm(dim=0, p=2) - target_length
) ** 2.0
if num_edges > 0:
predlosses[b] = predlosses[b] / num_edges
return predlosses.mean()
def test_mesh_edge_loss_output(self):
"""
Check outputs of tensorized and iterative implementations are the same.
"""
device = torch.device("cuda:0")
target_length = 0.5
num_meshes = 10
num_verts = 32
num_faces = 64
verts_list = []
faces_list = []
valid = torch.randint(2, size=(num_meshes,))
for n in range(num_meshes):
if valid[n]:
vn = torch.randint(3, high=num_verts, size=(1,))[0].item()
fn = torch.randint(vn, high=num_faces, size=(1,))[0].item()
verts = torch.rand((vn, 3), dtype=torch.float32, device=device)
faces = torch.randint(
vn, size=(fn, 3), dtype=torch.int64, device=device
)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts=verts_list, faces=faces_list)
loss = mesh_edge_loss(meshes, target_length=target_length)
predloss = TestMeshEdgeLoss.mesh_edge_loss_naive(meshes, target_length)
self.assertTrue(torch.allclose(loss, predloss))
@staticmethod
def mesh_edge_loss(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300
):
meshes = TestSamplePoints.init_meshes(
num_meshes, max_v, max_f, device="cuda:0"
)
torch.cuda.synchronize()
def compute_loss():
mesh_edge_loss(meshes, target_length=0.0)
torch.cuda.synchronize()
return compute_loss
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from pytorch3d.loss.mesh_laplacian_smoothing import mesh_laplacian_smoothing
from pytorch3d.structures.meshes import Meshes
class TestLaplacianSmoothing(unittest.TestCase):
@staticmethod
def laplacian_smoothing_naive_uniform(meshes):
"""
Naive implementation of laplacian smoothing with uniform weights.
"""
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
# filling L with the face pairs should be the same as edge pairs
for f in faces_packed:
L[f[0], f[1]] = 1
L[f[0], f[2]] = 1
L[f[1], f[2]] = 1
# symetric
L[f[1], f[0]] = 1
L[f[2], f[0]] = 1
L[f[2], f[1]] = 1
norm_w = L.sum(dim=1, keepdims=True)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
loss = (L.mm(verts_packed) * norm_w - verts_packed).norm(dim=1)
weights = torch.zeros(V, dtype=torch.float32, device=meshes.device)
for v in range(V):
weights[v] = meshes.num_verts_per_mesh()[
meshes.verts_packed_to_mesh_idx()[v]
]
weights = 1.0 / weights
loss = loss * weights
return loss.sum() / len(meshes)
@staticmethod
def laplacian_smoothing_naive_cot(meshes, method: str = "cot"):
"""
Naive implementation of laplacian smoothing wit cotangent weights.
"""
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
inv_areas = torch.zeros(
(V, 1), dtype=torch.float32, device=meshes.device
)
for f in faces_packed:
v0 = verts_packed[f[0], :]
v1 = verts_packed[f[1], :]
v2 = verts_packed[f[2], :]
A = (v1 - v2).norm()
B = (v0 - v2).norm()
C = (v0 - v1).norm()
s = 0.5 * (A + B + C)
face_area = (
(s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
)
inv_areas[f[0]] += face_area
inv_areas[f[1]] += face_area
inv_areas[f[2]] += face_area
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / face_area / 4.0
cotb = (A2 + C2 - B2) / face_area / 4.0
cotc = (A2 + B2 - C2) / face_area / 4.0
L[f[1], f[2]] += cota
L[f[2], f[0]] += cotb
L[f[0], f[1]] += cotc
# symetric
L[f[2], f[1]] += cota
L[f[0], f[2]] += cotb
L[f[1], f[0]] += cotc
idx = inv_areas > 0
inv_areas[idx] = 1.0 / inv_areas[idx]
norm_w = L.sum(dim=1, keepdims=True)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
if method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * inv_areas * 0.25
loss = loss.norm(dim=1)
else:
loss = L.mm(verts_packed) * norm_w - verts_packed
loss = loss.norm(dim=1)
weights = torch.zeros(V, dtype=torch.float32, device=meshes.device)
for v in range(V):
weights[v] = meshes.num_verts_per_mesh()[
meshes.verts_packed_to_mesh_idx()[v]
]
weights = 1.0 / weights
loss = loss * weights
return loss.sum() / len(meshes)
@staticmethod
def init_meshes(
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = (
torch.rand((num_verts, 3), dtype=torch.float32, device=device)
* 2.0
- 1.0
) # verts in the space of [-1, 1]
faces = torch.stack(
[
torch.randperm(num_verts, device=device)[:3]
for _ in range(num_faces)
],
dim=0,
)
# avoids duplicate vertices in a face
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
def test_laplacian_smoothing_uniform(self):
"""
Test Laplacian Smoothing with uniform weights.
"""
meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300)
# feats in list
out = mesh_laplacian_smoothing(meshes, method="uniform")
naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_uniform(
meshes
)
self.assertTrue(torch.allclose(out, naive_out))
def test_laplacian_smoothing_cot(self):
"""
Test Laplacian Smoothing with uniform weights.
"""
meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300)
# feats in list
out = mesh_laplacian_smoothing(meshes, method="cot")
naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_cot(
meshes, method="cot"
)
self.assertTrue(torch.allclose(out, naive_out))
def test_laplacian_smoothing_cotcurv(self):
"""
Test Laplacian Smoothing with uniform weights.
"""
meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300)
# feats in list
out = mesh_laplacian_smoothing(meshes, method="cotcurv")
naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_cot(
meshes, method="cotcurv"
)
self.assertTrue(torch.allclose(out, naive_out))
@staticmethod
def laplacian_smoothing_with_init(
num_meshes: int, num_verts: int, num_faces: int, device: str = "cpu"
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
torch.cuda.synchronize()
def smooth():
mesh_laplacian_smoothing(meshes, method="cotcurv")
torch.cuda.synchronize()
return smooth
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from pytorch3d.loss.mesh_normal_consistency import mesh_normal_consistency
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere
class TestMeshNormalConsistency(unittest.TestCase):
@staticmethod
def init_faces(num_verts: int = 1000):
faces = []
for f0 in range(num_verts):
for f1 in range(f0 + 1, num_verts):
f2 = torch.arange(f1 + 1, num_verts)
n = f2.shape[0]
if n == 0:
continue
faces.append(
torch.stack(
[
torch.full((n,), f0, dtype=torch.int64),
torch.full((n,), f1, dtype=torch.int64),
f2,
],
dim=1,
)
)
faces = torch.cat(faces, 0)
return faces
@staticmethod
def init_meshes(
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
):
device = torch.device("cuda:0")
valid_faces = TestMeshNormalConsistency.init_faces(num_verts).to(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = (
torch.rand((num_verts, 3), dtype=torch.float32, device=device)
* 2.0
- 1.0
) # verts in the space of [-1, 1]
"""
faces = torch.stack(
[
torch.randperm(num_verts, device=device)[:3]
for _ in range(num_faces)
],
dim=0,
)
# avoids duplicate vertices in a face
"""
idx = torch.randperm(valid_faces.shape[0], device=device)[
: min(valid_faces.shape[0], num_faces)
]
faces = valid_faces[idx]
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
@staticmethod
def mesh_normal_consistency_naive(meshes):
"""
Naive iterative implementation of mesh normal consistency.
"""
N = len(meshes)
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
edges_packed = meshes.edges_packed()
face_to_edge = meshes.faces_packed_to_edges_packed()
edges_packed_to_mesh_idx = meshes.edges_packed_to_mesh_idx()
E = edges_packed.shape[0]
loss = []
mesh_idx = []
for e in range(E):
face_idx = face_to_edge.eq(e).any(1).nonzero() # indexed to faces
v0 = verts_packed[edges_packed[e, 0]]
v1 = verts_packed[edges_packed[e, 1]]
normals = []
for f in face_idx:
v2 = -1
for j in range(3):
if (
faces_packed[f, j] != edges_packed[e, 0]
and faces_packed[f, j] != edges_packed[e, 1]
):
v2 = faces_packed[f, j]
assert v2 > -1
v2 = verts_packed[v2]
normals.append((v1 - v0).view(-1).cross((v2 - v0).view(-1)))
for i in range(len(normals) - 1):
for j in range(1, len(normals)):
if i != j:
mesh_idx.append(edges_packed_to_mesh_idx[e])
loss.append(
(
1
- torch.cosine_similarity(
normals[i].view(1, 3),
-normals[j].view(1, 3),
)
)
)
mesh_idx = torch.tensor(mesh_idx, device=meshes.device)
num = mesh_idx.bincount(minlength=N)
weights = 1.0 / num[mesh_idx].float()
loss = torch.cat(loss) * weights
return loss.sum() / N
def test_mesh_normal_consistency_simple(self):
r"""
Mesh 1:
v3
/\
/ \
e4 / f1 \ e3
/ \
v2 /___e2___\ v1
\ /
\ /
e1 \ f0 / e0
\ /
\/
v0
"""
device = torch.device("cuda:0")
# mesh1 shown above
verts1 = torch.rand((4, 3), dtype=torch.float32, device=device)
faces1 = torch.tensor(
[[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device
)
# mesh2 is a cuboid with 8 verts, 12 faces and 18 edges
verts2 = torch.tensor(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
],
dtype=torch.float32,
device=device,
)
faces2 = torch.tensor(
[
[0, 1, 2],
[1, 3, 2], # left face: 0, 1
[2, 3, 6],
[3, 7, 6], # bottom face: 2, 3
[0, 2, 6],
[0, 6, 4], # front face: 4, 5
[0, 5, 1],
[0, 4, 5], # up face: 6, 7
[6, 7, 5],
[6, 5, 4], # right face: 8, 9
[1, 7, 3],
[1, 5, 7], # back face: 10, 11
],
dtype=torch.int64,
device=device,
)
# mesh3 is like mesh1 but with another face added to e2
verts3 = torch.rand((5, 3), dtype=torch.float32, device=device)
faces3 = torch.tensor(
[[0, 1, 2], [2, 1, 3], [2, 1, 4]], dtype=torch.int64, device=device
)
meshes = Meshes(
verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3]
)
# mesh1: normal consistency computation
n0 = (verts1[1] - verts1[2]).cross(verts1[3] - verts1[2])
n1 = (verts1[1] - verts1[2]).cross(verts1[0] - verts1[2])
loss1 = 1.0 - torch.cosine_similarity(n0.view(1, 3), -n1.view(1, 3))
# mesh2: normal consistency computation
# In the cube mesh, 6 edges are shared with coplanar faces (loss=0),
# 12 edges are shared by perpendicular faces (loss=1)
loss2 = 12.0 / 18
# mesh3
n0 = (verts3[1] - verts3[2]).cross(verts3[3] - verts3[2])
n1 = (verts3[1] - verts3[2]).cross(verts3[0] - verts3[2])
n2 = (verts3[1] - verts3[2]).cross(verts3[4] - verts3[2])
loss3 = (
3.0
- torch.cosine_similarity(n0.view(1, 3), -n1.view(1, 3))
- torch.cosine_similarity(n0.view(1, 3), -n2.view(1, 3))
- torch.cosine_similarity(n1.view(1, 3), -n2.view(1, 3))
)
loss3 /= 3.0
loss = (loss1 + loss2 + loss3) / 3.0
out = mesh_normal_consistency(meshes)
self.assertTrue(torch.allclose(out, loss))
def test_mesh_normal_consistency(self):
"""
Test Mesh Normal Consistency for random meshes.
"""
meshes = TestMeshNormalConsistency.init_meshes(5, 100, 300)
out1 = mesh_normal_consistency(meshes)
out2 = TestMeshNormalConsistency.mesh_normal_consistency_naive(meshes)
self.assertTrue(torch.allclose(out1, out2))
@staticmethod
def mesh_normal_consistency_with_ico(
num_meshes: int, level: int = 3, device: str = "cpu"
):
device = torch.device(device)
mesh = ico_sphere(level, device)
verts, faces = mesh.get_mesh_verts_faces(0)
verts_list = [verts.clone() for _ in range(num_meshes)]
faces_list = [faces.clone() for _ in range(num_meshes)]
meshes = Meshes(verts_list, faces_list)
torch.cuda.synchronize()
def loss():
mesh_normal_consistency(meshes)
torch.cuda.synchronize()
return loss
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
from pytorch3d.structures.meshes import Meshes
from common_testing import TestCaseMixin
class TestMeshes(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
torch.manual_seed(42)
@staticmethod
def init_mesh(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
lists_to_tensors: bool = False,
device: str = "cpu",
):
"""
Function to generate a Meshes object of N meshes with
random numbers of vertices and faces.
Args:
num_meshes: Number of meshes to generate.
max_v: Max number of vertices per mesh.
max_f: Max number of faces per mesh.
lists_to_tensors: Determines whether the generated meshes should be
constructed from lists (=False) or
a tensor (=True) of faces/verts.
Returns:
Meshes object.
"""
device = torch.device(device)
verts_list = []
faces_list = []
# Randomly generate numbers of faces and vertices in each mesh.
if lists_to_tensors:
# If we define faces/verts with tensors, f/v has to be the
# same for each mesh in the batch.
f = torch.randint(max_f, size=(1,), dtype=torch.int32)
v = torch.randint(3, high=max_v, size=(1,), dtype=torch.int32)
f = f.repeat(num_meshes)
v = v.repeat(num_meshes)
else:
# For lists of faces and vertices, we can sample different v/f
# per mesh.
f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32)
v = torch.randint(
3, high=max_v, size=(num_meshes,), dtype=torch.int32
)
# Generate the actual vertices and faces.
for i in range(num_meshes):
verts = torch.rand((v[i], 3), dtype=torch.float32, device=device)
faces = torch.randint(
v[i], size=(f[i], 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
if lists_to_tensors:
verts_list = torch.stack(verts_list)
faces_list = torch.stack(faces_list)
return Meshes(verts=verts_list, faces=faces_list)
@staticmethod
def init_simple_mesh(device: str = "cpu"):
"""
Returns a Meshes data structure of simple mesh examples.
Returns:
Meshes object.
"""
device = torch.device(device)
verts = [
torch.tensor(
[[0.1, 0.3, 0.5], [0.5, 0.2, 0.1], [0.6, 0.8, 0.7]],
dtype=torch.float32,
device=device,
),
torch.tensor(
[
[0.1, 0.3, 0.3],
[0.6, 0.7, 0.8],
[0.2, 0.3, 0.4],
[0.1, 0.5, 0.3],
],
dtype=torch.float32,
device=device,
),
torch.tensor(
[
[0.7, 0.3, 0.6],
[0.2, 0.4, 0.8],
[0.9, 0.5, 0.2],
[0.2, 0.3, 0.4],
[0.9, 0.3, 0.8],
],
dtype=torch.float32,
device=device,
),
]
faces = [
torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device),
torch.tensor(
[[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device
),
torch.tensor(
[
[1, 2, 0],
[0, 1, 3],
[2, 3, 1],
[4, 3, 2],
[4, 0, 1],
[4, 3, 1],
[4, 2, 1],
],
dtype=torch.int64,
device=device,
),
]
return Meshes(verts=verts, faces=faces)
def test_simple(self):
mesh = TestMeshes.init_simple_mesh("cuda:0")
self.assertClose(
mesh.verts_packed_to_mesh_idx().cpu(),
torch.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_verts_packed_first_idx().cpu(), torch.tensor([0, 3, 7])
)
self.assertClose(
mesh.num_verts_per_mesh().cpu(), torch.tensor([3, 4, 5])
)
self.assertClose(
mesh.verts_padded_to_packed_idx().cpu(),
torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),
)
self.assertClose(
mesh.faces_packed_to_mesh_idx().cpu(),
torch.tensor([0, 1, 1, 2, 2, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3])
)
self.assertClose(
mesh.num_faces_per_mesh().cpu(), torch.tensor([1, 2, 7])
)
self.assertClose(
mesh.num_edges_per_mesh().cpu(),
torch.tensor([3, 5, 10], dtype=torch.int32),
)
def test_simple_random_meshes(self):
# Define the test mesh object either as a list or tensor of faces/verts.
for lists_to_tensors in (False, True):
N = 10
mesh = TestMeshes.init_mesh(
N, 100, 300, lists_to_tensors=lists_to_tensors
)
verts_list = mesh.verts_list()
faces_list = mesh.faces_list()
# Check batch calculations.
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertEqual(verts_per_mesh[n], v)
self.assertEqual(faces_per_mesh[n], f)
# Check compute packed.
verts_packed = mesh.verts_packed()
vert_to_mesh = mesh.verts_packed_to_mesh_idx()
mesh_to_vert = mesh.mesh_to_verts_packed_first_idx()
faces_packed = mesh.faces_packed()
face_to_mesh = mesh.faces_packed_to_mesh_idx()
mesh_to_face = mesh.mesh_to_faces_packed_first_idx()
curv, curf = 0, 0
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(
verts_packed[curv : curv + v, :], verts_list[n]
)
self.assertClose(
faces_packed[curf : curf + f, :] - curv, faces_list[n]
)
self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all())
self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all())
self.assertTrue(mesh_to_vert[n] == curv)
self.assertTrue(mesh_to_face[n] == curf)
curv += v
curf += f
# Check compute edges and compare with numpy unique.
edges = mesh.edges_packed().cpu().numpy()
edge_to_mesh_idx = mesh.edges_packed_to_mesh_idx().cpu().numpy()
num_edges_per_mesh = mesh.num_edges_per_mesh().cpu().numpy()
npfaces_packed = mesh.faces_packed().cpu().numpy()
e01 = npfaces_packed[:, [0, 1]]
e12 = npfaces_packed[:, [1, 2]]
e20 = npfaces_packed[:, [2, 0]]
npedges = np.concatenate((e12, e20, e01), axis=0)
npedges = np.sort(npedges, axis=1)
unique_edges, unique_idx = np.unique(
npedges, return_index=True, axis=0
)
self.assertTrue(np.allclose(edges, unique_edges))
temp = face_to_mesh.cpu().numpy()
temp = np.concatenate((temp, temp, temp), axis=0)
edge_to_mesh = temp[unique_idx]
self.assertTrue(np.allclose(edge_to_mesh_idx, edge_to_mesh))
num_edges = np.bincount(edge_to_mesh, minlength=N)
self.assertTrue(np.allclose(num_edges_per_mesh, num_edges))
def test_allempty(self):
verts_list = []
faces_list = []
mesh = Meshes(verts=verts_list, faces=faces_list)
self.assertEqual(len(mesh), 0)
self.assertEqual(mesh.verts_padded().shape[0], 0)
self.assertEqual(mesh.faces_padded().shape[0], 0)
self.assertEqual(mesh.verts_packed().shape[0], 0)
self.assertEqual(mesh.faces_packed().shape[0], 0)
def test_empty(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts_list = []
faces_list = []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
for n in range(N):
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(
F, size=(1,), dtype=torch.int32, device=device
)[0]
verts = torch.rand((v, 3), dtype=torch.float32, device=device)
faces = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = len(verts_list[n])
f = len(faces_list[n])
if v > 0:
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
if f > 0:
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertTrue(verts_per_mesh[n] == v)
self.assertTrue(faces_per_mesh[n] == f)
def test_padding(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts, faces = [], []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
num_verts, num_faces = (
torch.zeros(N, dtype=torch.int32),
torch.zeros(N, dtype=torch.int32),
)
for n in range(N):
verts.append(torch.rand((V, 3), dtype=torch.float32, device=device))
this_faces = torch.full(
(F, 3), -1, dtype=torch.int64, device=device
)
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(
F, size=(1,), dtype=torch.int32, device=device
)[0]
this_faces[:f, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
num_verts[n] = v
num_faces[n] = f
faces.append(this_faces)
mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))
self.assertListEqual(
mesh.num_faces_per_mesh().tolist(), num_faces.tolist()
)
for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())):
self.assertClose(ff, faces[n][: num_faces[n]])
self.assertClose(vv, verts[n])
new_faces = [ff.clone() for ff in faces]
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[
0
]
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
this_faces[10 : f + 10, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
new_faces[3] = this_faces
with self.assertRaisesRegex(ValueError, "Padding of faces"):
Meshes(verts=torch.stack(verts), faces=torch.stack(new_faces))
def test_clone(self):
N = 5
mesh = TestMeshes.init_mesh(N, 10, 100)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
new_mesh = mesh.clone()
# Modify tensors in both meshes.
new_mesh._verts_list[0] = new_mesh._verts_list[0] * 5
mesh._num_verts_per_mesh = torch.randint_like(
mesh.num_verts_per_mesh(), high=10
)
# Check cloned and original Meshes objects do not share tensors.
self.assertFalse(
torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])
)
self.assertFalse(
torch.allclose(
mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh()
)
)
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())
self.assertSeparate(new_mesh.faces_packed(), mesh.faces_packed())
self.assertSeparate(new_mesh.faces_padded(), mesh.faces_padded())
self.assertSeparate(new_mesh.edges_packed(), mesh.edges_packed())
def test_laplacian_packed(self):
def naive_laplacian_packed(meshes):
verts_packed = meshes.verts_packed()
edges_packed = meshes.edges_packed()
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
for e in edges_packed:
L[e[0], e[1]] = 1
# symetric
L[e[1], e[0]] = 1
deg = L.sum(1).view(-1, 1)
deg[deg > 0] = 1.0 / deg[deg > 0]
L = L * deg
diag = torch.eye(V, dtype=torch.float32, device=meshes.device)
L.masked_fill_(diag > 0, -1)
return L
# Note that we don't test with random meshes for this case, as the
# definition of Laplacian is defined for simple graphs (aka valid meshes)
meshes = TestMeshes.init_simple_mesh("cuda:0")
lapl_naive = naive_laplacian_packed(meshes)
lapl = meshes.laplacian_packed().to_dense()
# check with naive
self.assertClose(lapl, lapl_naive)
def test_offset_verts(self):
def naive_offset_verts(mesh, vert_offsets_packed):
# new Meshes class
new_verts_packed = mesh.verts_packed() + vert_offsets_packed
new_verts_list = list(
new_verts_packed.split(mesh.num_verts_per_mesh().tolist(), 0)
)
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
mesh = TestMeshes.init_mesh(N, 10, 100)
all_v = mesh.verts_packed().size(0)
verts_per_mesh = mesh.num_verts_per_mesh()
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
deform = torch.rand(
(all_v, 3), dtype=torch.float32, device=mesh.device
)
# new meshes class to hold the deformed mesh
new_mesh_naive = naive_offset_verts(mesh, deform)
new_mesh = mesh.offset_verts(deform)
# check verts_list & faces_list
verts_cumsum = torch.cumsum(verts_per_mesh, 0).tolist()
verts_cumsum.insert(0, 0)
for i in range(N):
self.assertClose(
new_mesh.verts_list()[i],
mesh.verts_list()[i]
+ deform[verts_cumsum[i] : verts_cumsum[i + 1]],
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(
mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check faces and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
)
# check padded & packed
self.assertClose(
new_mesh.faces_padded(), new_mesh_naive.faces_padded()
)
self.assertClose(
new_mesh.verts_padded(), new_mesh_naive.verts_padded()
)
self.assertClose(
new_mesh.faces_packed(), new_mesh_naive.faces_packed()
)
self.assertClose(
new_mesh.verts_packed(), new_mesh_naive.verts_packed()
)
self.assertClose(
new_mesh.edges_packed(), new_mesh_naive.edges_packed()
)
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(),
new_mesh_naive.num_verts_per_mesh(),
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(),
new_mesh_naive.num_faces_per_mesh(),
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
)
self.assertClose(
new_mesh.faces_areas_packed(),
new_mesh_naive.faces_areas_packed(),
)
def test_scale_verts(self):
def naive_scale_verts(mesh, scale):
if not torch.is_tensor(scale):
scale = torch.ones(len(mesh)).mul_(scale)
# new Meshes class
new_verts_list = [
scale[i] * v.clone() for (i, v) in enumerate(mesh.verts_list())
]
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
for test in ["tensor", "scalar"]:
mesh = TestMeshes.init_mesh(N, 10, 100)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
if test == "tensor":
scales = torch.rand(N)
elif test == "scalar":
scales = torch.rand(1)[0].item()
new_mesh_naive = naive_scale_verts(mesh, scales)
new_mesh = mesh.scale_verts(scales)
for i in range(N):
if test == "tensor":
self.assertClose(
scales[i] * mesh.verts_list()[i],
new_mesh.verts_list()[i],
)
else:
self.assertClose(
scales * mesh.verts_list()[i],
new_mesh.verts_list()[i],
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(
mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check face and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
)
# check padded & packed
self.assertClose(
new_mesh.faces_padded(), new_mesh_naive.faces_padded()
)
self.assertClose(
new_mesh.verts_padded(), new_mesh_naive.verts_padded()
)
self.assertClose(
new_mesh.faces_packed(), new_mesh_naive.faces_packed()
)
self.assertClose(
new_mesh.verts_packed(), new_mesh_naive.verts_packed()
)
self.assertClose(
new_mesh.edges_packed(), new_mesh_naive.edges_packed()
)
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(),
new_mesh_naive.num_verts_per_mesh(),
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(),
new_mesh_naive.num_faces_per_mesh(),
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
)
self.assertClose(
new_mesh.faces_areas_packed(),
new_mesh_naive.faces_areas_packed(),
)
def test_extend_list(self):
N = 10
mesh = TestMeshes.init_mesh(5, 10, 100)
for force in [0, 1]:
if force:
# force some computes to happen
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
new_mesh = mesh.extend(N)
self.assertEqual(len(mesh) * 10, len(new_mesh))
for i in range(len(mesh)):
for n in range(N):
self.assertClose(
mesh.verts_list()[i], new_mesh.verts_list()[i * N + n]
)
self.assertClose(
mesh.faces_list()[i], new_mesh.faces_list()[i * N + n]
)
self.assertTrue(mesh.valid[i] == new_mesh.valid[i * N + n])
self.assertAllSeparate(
mesh.verts_list()
+ new_mesh.verts_list()
+ mesh.faces_list()
+ new_mesh.faces_list()
)
self.assertTrue(new_mesh._verts_packed is None)
self.assertTrue(new_mesh._faces_packed is None)
self.assertTrue(new_mesh._verts_padded is None)
self.assertTrue(new_mesh._faces_padded is None)
self.assertTrue(new_mesh._edges_packed is None)
with self.assertRaises(ValueError):
mesh.extend(N=-1)
def test_to(self):
mesh = TestMeshes.init_mesh(5, 10, 100, device=torch.device("cuda:0"))
device = torch.device("cuda:1")
new_mesh = mesh.to(device)
self.assertTrue(new_mesh.device == device)
self.assertTrue(mesh.device == torch.device("cuda:0"))
def test_split_mesh(self):
mesh = TestMeshes.init_mesh(5, 10, 100)
split_sizes = [2, 3]
split_meshes = mesh.split(split_sizes)
self.assertTrue(len(split_meshes[0]) == 2)
self.assertTrue(
split_meshes[0].verts_list()
== [
mesh.get_mesh_verts_faces(0)[0],
mesh.get_mesh_verts_faces(1)[0],
]
)
self.assertTrue(len(split_meshes[1]) == 3)
self.assertTrue(
split_meshes[1].verts_list()
== [
mesh.get_mesh_verts_faces(2)[0],
mesh.get_mesh_verts_faces(3)[0],
mesh.get_mesh_verts_faces(4)[0],
]
)
split_sizes = [2, 0.3]
with self.assertRaises(ValueError):
mesh.split(split_sizes)
def test_get_mesh_verts_faces(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
for i, (V, F) in enumerate(verts_faces):
verts, faces = mesh.get_mesh_verts_faces(i)
self.assertTrue(len(verts) == V)
self.assertClose(verts, verts_list[i])
self.assertTrue(len(faces) == F)
self.assertClose(faces, faces_list[i])
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(5)
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(0.2)
def test_get_bounding_boxes(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for (V, F) in [(10, 100)]:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
mins = torch.min(verts, dim=0)[0]
maxs = torch.max(verts, dim=0)[0]
bboxes_gt = torch.stack([mins, maxs], dim=1).unsqueeze(0)
mesh = Meshes(verts=verts_list, faces=faces_list)
bboxes = mesh.get_bounding_boxes()
self.assertClose(bboxes_gt, bboxes)
def test_padded_to_packed_idx(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded_to_packed_idx = mesh.verts_padded_to_packed_idx()
verts_packed = mesh.verts_packed()
verts_padded = mesh.verts_padded()
verts_padded_flat = verts_padded.view(-1, 3)
self.assertClose(
verts_padded_flat[verts_padded_to_packed_idx], verts_packed
)
idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3)
self.assertClose(verts_padded_flat.gather(0, idx), verts_packed)
def test_getitem(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
def check_equal(selected, indices):
for selectedIdx, index in enumerate(indices):
self.assertClose(
selected.verts_list()[selectedIdx], mesh.verts_list()[index]
)
self.assertClose(
selected.faces_list()[selectedIdx], mesh.faces_list()[index]
)
# int index
index = 1
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == 1)
check_equal(mesh_selected, [index])
# list index
index = [1, 2]
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == len(index))
check_equal(mesh_selected, index)
# slice index
index = slice(0, 2, 1)
mesh_selected = mesh[index]
check_equal(mesh_selected, [0, 1])
# bool tensor
index = torch.tensor([1, 0, 1], dtype=torch.bool, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.sum())
check_equal(mesh_selected, [0, 2])
# int tensor
index = torch.tensor([1, 2], dtype=torch.int64, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.numel())
check_equal(mesh_selected, index.tolist())
# invalid index
index = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
index = 1.2
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
def test_compute_faces_areas(self):
verts = torch.tensor(
[
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.25, 0.8, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [0, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
face_areas = mesh.faces_areas_packed()
expected_areas = torch.tensor([0.125, 0.2])
self.assertTrue(torch.allclose(face_areas, expected_areas))
def test_compute_normals(self):
# Simple case with one mesh where normals point in either +/- ijk
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
[0.5, 0.0, 0.2],
[0.6, 0.0, 0.5],
[0.8, 0.0, 0.7],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor(
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=torch.int64
)
mesh = Meshes(verts=[verts], faces=[faces])
verts_normals_expected = torch.tensor(
[
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
faces_normals_expected = verts_normals_expected[[0, 3, 6, 9], :]
self.assertTrue(
torch.allclose(mesh.verts_normals_list()[0], verts_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.faces_normals_list()[0], faces_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.verts_normals_packed(), verts_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.faces_normals_packed(), faces_normals_expected)
)
# Multiple meshes in the batch with equal sized meshes
meshes_extended = mesh.extend(3)
for m in meshes_extended.verts_normals_list():
self.assertTrue(torch.allclose(m, verts_normals_expected))
for f in meshes_extended.faces_normals_list():
self.assertTrue(torch.allclose(f, faces_normals_expected))
# Multiple meshes in the batch with different sized meshes
# Check padded and packed normals are the correct sizes.
verts2 = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces2 = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.int64)
verts_list = [verts, verts2]
faces_list = [faces, faces2]
meshes = Meshes(verts=verts_list, faces=faces_list)
verts_normals_padded = meshes.verts_normals_padded()
faces_normals_padded = meshes.faces_normals_padded()
for n in range(len(meshes)):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
if verts_normals_padded.shape[1] > v:
self.assertTrue(verts_normals_padded[n, v:, :].eq(0).all())
self.assertTrue(
torch.allclose(
verts_normals_padded[n, :v, :].view(-1, 3),
verts_normals_expected[:v, :],
)
)
if faces_normals_padded.shape[1] > f:
self.assertTrue(faces_normals_padded[n, f:, :].eq(0).all())
self.assertTrue(
torch.allclose(
faces_normals_padded[n, :f, :].view(-1, 3),
faces_normals_expected[:f, :],
)
)
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
self.assertTrue(
list(verts_normals_packed.shape)
== [verts.shape[0] + verts2.shape[0], 3]
)
self.assertTrue(
list(faces_normals_packed.shape)
== [faces.shape[0] + faces2.shape[0], 3]
)
# Single mesh where two faces share one vertex so the normal is
# the weighted sum of the two face normals.
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.0, 0.3, 0.2], # vertex is shared between two faces
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
verts_normals_expected = torch.tensor(
[
[-0.2408, -0.9631, -0.1204],
[-0.2408, -0.9631, -0.1204],
[-0.9389, -0.3414, -0.0427],
[-1.0000, 0.0000, 0.0000],
[-1.0000, 0.0000, 0.0000],
]
)
faces_normals_expected = torch.tensor(
[[-0.2408, -0.9631, -0.1204], [-1.0000, 0.0000, 0.0000]]
)
self.assertTrue(
torch.allclose(
mesh.verts_normals_list()[0], verts_normals_expected, atol=4e-5
)
)
self.assertTrue(
torch.allclose(
mesh.faces_normals_list()[0], faces_normals_expected, atol=4e-5
)
)
# Check empty mesh has empty normals
meshes = Meshes(verts=[], faces=[])
self.assertEqual(meshes.verts_normals_packed().shape[0], 0)
self.assertEqual(meshes.verts_normals_padded().shape[0], 0)
self.assertEqual(meshes.verts_normals_list(), [])
self.assertEqual(meshes.faces_normals_packed().shape[0], 0)
self.assertEqual(meshes.faces_normals_padded().shape[0], 0)
self.assertEqual(meshes.faces_normals_list(), [])
def test_compute_faces_areas_cpu_cuda(self):
num_meshes = 10
max_v = 100
max_f = 300
mesh_cpu = TestMeshes.init_mesh(num_meshes, max_v, max_f, device="cpu")
device = torch.device("cuda:0")
mesh_cuda = mesh_cpu.to(device)
face_areas_cpu = mesh_cpu.faces_areas_packed()
face_normals_cpu = mesh_cpu.faces_normals_packed()
face_areas_cuda = mesh_cuda.faces_areas_packed()
face_normals_cuda = mesh_cuda.faces_normals_packed()
self.assertClose(face_areas_cpu, face_areas_cuda.cpu(), atol=1e-6)
# because of the normalization of the normals with arbitrarily small values,
# normals can become unstable. Thus only compare normals, for faces
# with areas > eps=1e-6
nonzero = face_areas_cpu > 1e-6
self.assertClose(
face_normals_cpu[nonzero],
face_normals_cuda.cpu()[nonzero],
atol=1e-6,
)
@staticmethod
def compute_packed_with_init(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
device: str = "cpu",
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_packed():
mesh._compute_packed(refresh=True)
torch.cuda.synchronize()
return compute_packed
@staticmethod
def compute_padded_with_init(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
device: str = "cpu",
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_padded():
mesh._compute_padded(refresh=True)
torch.cuda.synchronize()
return compute_padded
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d import _C
class TestNearestNeighborPoints(unittest.TestCase):
@staticmethod
def nn_points_idx_naive(x, y):
"""
PyTorch implementation of nn_points_idx function.
"""
N, P1, D = x.shape
_N, P2, _D = y.shape
assert N == _N and D == _D
diffs = x.view(N, P1, 1, D) - y.view(N, 1, P2, D)
dists2 = (diffs * diffs).sum(3)
idx = dists2.argmin(2)
return idx
def test_nn_cuda(self):
"""
Test cuda output vs naive python implementation.
"""
device = torch.device("cuda:0")
for D in [3, 4]:
for N in [1, 4]:
for P1 in [1, 8, 64, 128]:
for P2 in [32, 128]:
x = torch.randn(N, P1, D, device=device)
y = torch.randn(N, P2, D, device=device)
# _C.nn_points_idx should dispatch
# to the cpp or cuda versions of the function
# depending on the input type.
idx1 = _C.nn_points_idx(x, y)
idx2 = TestNearestNeighborPoints.nn_points_idx_naive(
x, y
)
self.assertTrue(idx1.size(1) == P1)
self.assertTrue(torch.all(idx1 == idx2))
def test_nn_cuda_error(self):
"""
Check that nn_points_idx throws an error if cpu tensors
are given as input.
"""
x = torch.randn(1, 1, 3)
y = torch.randn(1, 1, 3)
with self.assertRaises(Exception) as err:
_C.nn_points_idx(x, y)
self.assertTrue("Not implemented on the CPU" in str(err.exception))
@staticmethod
def bm_nn_points_cuda_with_init(
N: int = 4, D: int = 4, P1: int = 128, P2: int = 128
):
device = torch.device("cuda:0")
x = torch.randn(N, P1, D, device=device)
y = torch.randn(N, P2, D, device=device)
torch.cuda.synchronize()
def nn_cpp():
_C.nn_points_idx(x.contiguous(), y.contiguous())
torch.cuda.synchronize()
return nn_cpp
@staticmethod
def bm_nn_points_python_with_init(
N: int = 4, D: int = 4, P1: int = 128, P2: int = 128
):
x = torch.randn(N, P1, D)
y = torch.randn(N, P2, D)
def nn_python():
TestNearestNeighborPoints.nn_points_idx_naive(x, y)
return nn_python
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import unittest
from io import StringIO
from pathlib import Path
import torch
from pytorch3d.io import load_obj, save_obj
class TestMeshObjIO(unittest.TestCase):
def test_load_obj_simple(self):
obj_file = "\n".join(
[
"# this is a comment", # Comments should be ignored.
"v 0.1 0.2 0.3",
"v 0.2 0.3 0.4",
"v 0.3 0.4 0.5",
"v 0.4 0.5 0.6", # some obj files have multiple spaces after v
"f 1 2 3",
"f 1 2 4 3 1", # Polygons should be split into triangles
]
)
obj_file = StringIO(obj_file)
verts, faces, aux = load_obj(obj_file)
normals = aux.normals
textures = aux.verts_uvs
materials = aux.material_colors
tex_maps = aux.texture_images
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
expected_faces = torch.tensor(
[
[0, 1, 2], # First face
[0, 1, 3], # Second face (polygon)
[0, 3, 2], # Second face (polygon)
[0, 2, 0], # Second face (polygon)
],
dtype=torch.int64,
)
self.assertTrue(torch.all(verts == expected_verts))
self.assertTrue(torch.all(faces.verts_idx == expected_faces))
self.assertTrue(faces.normals_idx == [])
self.assertTrue(faces.textures_idx == [])
self.assertTrue(
torch.all(faces.materials_idx == -torch.ones(len(expected_faces)))
)
self.assertTrue(normals is None)
self.assertTrue(textures is None)
self.assertTrue(materials is None)
self.assertTrue(tex_maps is None)
def test_load_obj_complex(self):
obj_file = "\n".join(
[
"# this is a comment", # Comments should be ignored.
"v 0.1 0.2 0.3",
"v 0.2 0.3 0.4",
"v 0.3 0.4 0.5",
"v 0.4 0.5 0.6",
"vn 0.000000 0.000000 -1.000000",
"vn -1.000000 -0.000000 -0.000000",
"vn -0.000000 -0.000000 1.000000", # Normals should not be ignored.
"v 0.5 0.6 0.7",
"vt 0.749279 0.501284 0.0", # Some files add 0.0 - ignore this.
"vt 0.999110 0.501077",
"vt 0.999455 0.750380",
"f 1 2 3",
"f 1 2 4 3 5", # Polygons should be split into triangles
"f 2/1/2 3/1/2 4/2/2", # Texture/normals are loaded correctly.
"f -1 -2 1", # Negative indexing counts from the end.
]
)
obj_file = StringIO(obj_file)
verts, faces, aux = load_obj(obj_file)
normals = aux.normals
textures = aux.verts_uvs
materials = aux.material_colors
tex_maps = aux.texture_images
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
[0.5, 0.6, 0.7],
],
dtype=torch.float32,
)
expected_faces = torch.tensor(
[
[0, 1, 2], # First face
[0, 1, 3], # Second face (polygon)
[0, 3, 2], # Second face (polygon)
[0, 2, 4], # Second face (polygon)
[1, 2, 3], # Third face (normals / texture)
[4, 3, 0], # Fourth face (negative indices)
],
dtype=torch.int64,
)
expected_normals = torch.tensor(
[
[0.000000, 0.000000, -1.000000],
[-1.000000, -0.000000, -0.000000],
[-0.000000, -0.000000, 1.000000],
],
dtype=torch.float32,
)
expected_textures = torch.tensor(
[[0.749279, 0.501284], [0.999110, 0.501077], [0.999455, 0.750380]],
dtype=torch.float32,
)
expected_faces_normals_idx = torch.tensor(
[[1, 1, 1]], dtype=torch.int64
)
expected_faces_textures_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
self.assertTrue(torch.all(verts == expected_verts))
self.assertTrue(torch.all(faces.verts_idx == expected_faces))
self.assertTrue(torch.allclose(normals, expected_normals))
self.assertTrue(torch.allclose(textures, expected_textures))
self.assertTrue(
torch.allclose(faces.normals_idx, expected_faces_normals_idx)
)
self.assertTrue(
torch.allclose(faces.textures_idx, expected_faces_textures_idx)
)
self.assertTrue(materials is None)
self.assertTrue(tex_maps is None)
def test_load_obj_normals_only(self):
obj_file = "\n".join(
[
"v 0.1 0.2 0.3",
"v 0.2 0.3 0.4",
"v 0.3 0.4 0.5",
"v 0.4 0.5 0.6",
"vn 0.000000 0.000000 -1.000000",
"vn -1.000000 -0.000000 -0.000000",
"f 2//1 3//1 4//2",
]
)
obj_file = StringIO(obj_file)
expected_faces_normals_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
expected_normals = torch.tensor(
[
[0.000000, 0.000000, -1.000000],
[-1.000000, -0.000000, -0.000000],
],
dtype=torch.float32,
)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
verts, faces, aux = load_obj(obj_file)
normals = aux.normals
textures = aux.verts_uvs
materials = aux.material_colors
tex_maps = aux.texture_images
self.assertTrue(
torch.allclose(faces.normals_idx, expected_faces_normals_idx)
)
self.assertTrue(torch.allclose(normals, expected_normals))
self.assertTrue(torch.allclose(verts, expected_verts))
self.assertTrue(faces.textures_idx == [])
self.assertTrue(textures is None)
self.assertTrue(materials is None)
self.assertTrue(tex_maps is None)
def test_load_obj_textures_only(self):
obj_file = "\n".join(
[
"v 0.1 0.2 0.3",
"v 0.2 0.3 0.4",
"v 0.3 0.4 0.5",
"v 0.4 0.5 0.6",
"vt 0.999110 0.501077",
"vt 0.999455 0.750380",
"f 2/1 3/1 4/2",
]
)
obj_file = StringIO(obj_file)
expected_faces_textures_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
expected_textures = torch.tensor(
[[0.999110, 0.501077], [0.999455, 0.750380]], dtype=torch.float32
)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
verts, faces, aux = load_obj(obj_file)
normals = aux.normals
textures = aux.verts_uvs
materials = aux.material_colors
tex_maps = aux.texture_images
self.assertTrue(
torch.allclose(faces.textures_idx, expected_faces_textures_idx)
)
self.assertTrue(torch.allclose(expected_textures, textures))
self.assertTrue(torch.allclose(expected_verts, verts))
self.assertTrue(faces.normals_idx == [])
self.assertTrue(normals is None)
self.assertTrue(materials is None)
self.assertTrue(tex_maps is None)
def test_load_obj_error_textures(self):
obj_file = "\n".join(["vt 0.1"])
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("does not have 2 values" in str(err.exception))
def test_load_obj_error_normals(self):
obj_file = "\n".join(["vn 0.1"])
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("does not have 3 values" in str(err.exception))
def test_load_obj_error_vertices(self):
obj_file = "\n".join(["v 1"])
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("does not have 3 values" in str(err.exception))
def test_load_obj_error_inconsistent_triplets(self):
obj_file = "\n".join(["f 2//1 3/1 4/1/2"])
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue(
"Vertex properties are inconsistent" in str(err.exception)
)
def test_load_obj_error_too_many_vertex_properties(self):
obj_file = "\n".join(["f 2/1/1/3"])
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue(
"Face vertices can ony have 3 properties" in str(err.exception)
)
def test_load_obj_error_invalid_vertex_indices(self):
obj_file = "\n".join(
["v 0.1 0.2 0.3", "v 0.1 0.2 0.3", "v 0.1 0.2 0.3", "f -2 5 1"]
)
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("Faces have invalid indices." in str(err.exception))
def test_load_obj_error_invalid_normal_indices(self):
obj_file = "\n".join(
[
"v 0.1 0.2 0.3",
"v 0.1 0.2 0.3",
"v 0.1 0.2 0.3",
"vn 0.1 0.2 0.3",
"vn 0.1 0.2 0.3",
"vn 0.1 0.2 0.3",
"f -2/2 2/4 1/1",
]
)
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("Faces have invalid indices." in str(err.exception))
def test_load_obj_error_invalid_texture_indices(self):
obj_file = "\n".join(
[
"v 0.1 0.2 0.3",
"v 0.1 0.2 0.3",
"v 0.1 0.2 0.3",
"vt 0.1 0.2",
"vt 0.1 0.2",
"vt 0.1 0.2",
"f -2//2 2//6 1//1",
]
)
obj_file = StringIO(obj_file)
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue("Faces have invalid indices." in str(err.exception))
def test_save_obj(self):
verts = torch.tensor(
[
[0.01, 0.2, 0.301],
[0.2, 0.03, 0.408],
[0.3, 0.4, 0.05],
[0.6, 0.7, 0.8],
],
dtype=torch.float32,
)
faces = torch.tensor(
[[0, 2, 1], [0, 1, 2], [3, 2, 1], [3, 1, 0]], dtype=torch.int64
)
obj_file = StringIO()
save_obj(obj_file, verts, faces, decimal_places=2)
expected_file = "\n".join(
[
"v 0.01 0.20 0.30",
"v 0.20 0.03 0.41",
"v 0.30 0.40 0.05",
"v 0.60 0.70 0.80",
"f 1 3 2",
"f 1 2 3",
"f 4 3 2",
"f 4 2 1",
]
)
actual_file = obj_file.getvalue()
self.assertEqual(actual_file, expected_file)
def test_load_mtl(self):
DATA_DIR = (
Path(__file__).resolve().parent.parent / "docs/tutorials/data"
)
obj_filename = "cow_mesh/cow.obj"
filename = os.path.join(DATA_DIR, obj_filename)
verts, faces, aux = load_obj(filename)
materials = aux.material_colors
tex_maps = aux.texture_images
dtype = torch.float32
expected_materials = {
"material_1": {
"ambient_color": torch.tensor([1.0, 1.0, 1.0], dtype=dtype),
"diffuse_color": torch.tensor([1.0, 1.0, 1.0], dtype=dtype),
"specular_color": torch.tensor([0.0, 0.0, 0.0], dtype=dtype),
"shininess": torch.tensor([10.0], dtype=dtype),
}
}
# Check that there is an image with material name material_1.
self.assertTrue(tuple(tex_maps.keys()) == ("material_1",))
self.assertTrue(torch.is_tensor(tuple(tex_maps.values())[0]))
self.assertTrue(
torch.all(faces.materials_idx == torch.zeros(len(faces.verts_idx)))
)
# Check all keys and values in dictionary are the same.
for n1, n2 in zip(materials.keys(), expected_materials.keys()):
self.assertTrue(n1 == n2)
for k1, k2 in zip(
materials[n1].keys(), expected_materials[n2].keys()
):
self.assertTrue(
torch.allclose(
materials[n1][k1], expected_materials[n2][k2]
)
)
def test_load_mtl_fail(self):
# Faces have a material
obj_file = "\n".join(
[
"v 0.1 0.2 0.3",
"v 0.2 0.3 0.4",
"v 0.3 0.4 0.5",
"v 0.4 0.5 0.6",
"usemtl material_1",
"f 1 2 3",
"f 1 2 4",
]
)
obj_file = StringIO(obj_file)
with self.assertWarnsRegex(Warning, "No mtl file provided"):
verts, faces, aux = load_obj(obj_file)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
self.assertTrue(torch.allclose(verts, expected_verts))
self.assertTrue(torch.allclose(faces.verts_idx, expected_faces))
self.assertTrue(aux.material_colors is None)
self.assertTrue(aux.texture_images is None)
self.assertTrue(aux.normals is None)
self.assertTrue(aux.verts_uvs is None)
def test_load_obj_missing_texture(self):
DATA_DIR = Path(__file__).resolve().parent / "data"
obj_filename = "missing_files_obj/model.obj"
filename = os.path.join(DATA_DIR, obj_filename)
with self.assertWarnsRegex(Warning, "Texture file does not exist"):
verts, faces, aux = load_obj(filename)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
self.assertTrue(torch.allclose(verts, expected_verts))
self.assertTrue(torch.allclose(faces.verts_idx, expected_faces))
def test_load_obj_missing_mtl(self):
DATA_DIR = Path(__file__).resolve().parent / "data"
obj_filename = "missing_files_obj/model2.obj"
filename = os.path.join(DATA_DIR, obj_filename)
with self.assertWarnsRegex(Warning, "Mtl file does not exist"):
verts, faces, aux = load_obj(filename)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
self.assertTrue(torch.allclose(verts, expected_verts))
self.assertTrue(torch.allclose(faces.verts_idx, expected_faces))
@staticmethod
def save_obj_with_init(V: int, F: int):
verts_list = torch.tensor(V * [[0.11, 0.22, 0.33]]).view(-1, 3)
faces_list = torch.tensor(F * [[1, 2, 3]]).view(-1, 3)
obj_file = StringIO()
def save_mesh():
save_obj(obj_file, verts_list, faces_list, decimal_places=2)
return save_mesh
@staticmethod
def load_obj_with_init(V: int, F: int):
obj = "\n".join(["v 0.1 0.2 0.3"] * V + ["f 1 2 3"] * F)
def load_mesh():
obj_file = StringIO(obj)
verts, faces, aux = load_obj(obj_file)
return load_mesh
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import struct
import unittest
from io import BytesIO, StringIO
import torch
from pytorch3d.io.ply_io import _load_ply_raw, load_ply, save_ply
from common_testing import TestCaseMixin
class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
def test_raw_load_simple_ascii(self):
ply_file = "\n".join(
[
"ply",
"format ascii 1.0",
"comment made by Greg Turk",
"comment this file is a cube",
"element vertex 8",
"property float x",
"property float y",
"property float z",
"element face 6",
"property list uchar int vertex_index",
"element irregular_list 3",
"property list uchar int vertex_index",
"end_header",
"0 0 0",
"0 0 1",
"0 1 1",
"0 1 0",
"1 0 0",
"1 0 1",
"1 1 1",
"1 1 0",
"4 0 1 2 3",
"4 7 6 5 4",
"4 0 4 5 1",
"4 1 5 6 2",
"4 2 6 7 3",
"4 3 7 4 0", # end of faces
"4 0 1 2 3",
"4 7 6 5 4",
"3 4 5 1",
]
)
for line_ending in [None, "\n", "\r\n"]:
if line_ending is None:
stream = StringIO(ply_file)
else:
byte_file = ply_file.encode("ascii")
if line_ending == "\r\n":
byte_file = byte_file.replace(b"\n", b"\r\n")
stream = BytesIO(byte_file)
header, data = _load_ply_raw(stream)
self.assertTrue(header.ascii)
self.assertEqual(len(data), 3)
self.assertTupleEqual(data["face"].shape, (6, 4))
self.assertClose([0, 1, 2, 3], data["face"][0])
self.assertClose([3, 7, 4, 0], data["face"][5])
self.assertTupleEqual(data["vertex"].shape, (8, 3))
irregular = data["irregular_list"]
self.assertEqual(len(irregular), 3)
self.assertEqual(type(irregular), list)
[x] = irregular[0]
self.assertClose(x, [0, 1, 2, 3])
[x] = irregular[1]
self.assertClose(x, [7, 6, 5, 4])
[x] = irregular[2]
self.assertClose(x, [4, 5, 1])
def test_load_simple_ascii(self):
ply_file = "\n".join(
[
"ply",
"format ascii 1.0",
"comment made by Greg Turk",
"comment this file is a cube",
"element vertex 8",
"property float x",
"property float y",
"property float z",
"element face 6",
"property list uchar int vertex_index",
"end_header",
"0 0 0",
"0 0 1",
"0 1 1",
"0 1 0",
"1 0 0",
"1 0 1",
"1 1 1",
"1 1 0",
"4 0 1 2 3",
"4 7 6 5 4",
"4 0 4 5 1",
"4 1 5 6 2",
"4 2 6 7 3",
"4 3 7 4 0",
]
)
for line_ending in [None, "\n", "\r\n"]:
if line_ending is None:
stream = StringIO(ply_file)
else:
byte_file = ply_file.encode("ascii")
if line_ending == "\r\n":
byte_file = byte_file.replace(b"\n", b"\r\n")
stream = BytesIO(byte_file)
verts, faces = load_ply(stream)
self.assertEqual(verts.shape, (8, 3))
self.assertEqual(faces.shape, (12, 3))
verts_expected = [
[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 1],
[1, 1, 0],
]
self.assertClose(verts, torch.FloatTensor(verts_expected))
faces_expected = [
[0, 1, 2],
[7, 6, 5],
[0, 4, 5],
[1, 5, 6],
[2, 6, 7],
[3, 7, 4],
[0, 2, 3],
[7, 5, 4],
[0, 5, 1],
[1, 6, 2],
[2, 7, 3],
[3, 4, 0],
]
self.assertClose(faces, torch.LongTensor(faces_expected))
def test_simple_save(self):
verts = torch.tensor(
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=torch.float32
)
faces = torch.tensor([[0, 1, 2], [0, 3, 4]])
file = StringIO()
save_ply(file, verts=verts, faces=faces)
file.seek(0)
verts2, faces2 = load_ply(file)
self.assertClose(verts, verts2)
self.assertClose(faces, faces2)
def test_load_simple_binary(self):
for big_endian in [True, False]:
verts = (
"0 0 0 "
"0 0 1 "
"0 1 1 "
"0 1 0 "
"1 0 0 "
"1 0 1 "
"1 1 1 "
"1 1 0"
).split()
faces = (
"4 0 1 2 3 "
"4 7 6 5 4 "
"4 0 4 5 1 "
"4 1 5 6 2 "
"4 2 6 7 3 "
"4 3 7 4 0 " # end of first 6
"4 0 1 2 3 "
"4 7 6 5 4 "
"3 4 5 1"
).split()
short_one = b"\00\01" if big_endian else b"\01\00"
mixed_data = b"\00\00" b"\03\03" + (
short_one + b"\00\01\01\01" b"\00\02"
)
minus_one_data = b"\xff" * 14
endian_char = ">" if big_endian else "<"
format = (
"format binary_big_endian 1.0"
if big_endian
else "format binary_little_endian 1.0"
)
vertex_pattern = endian_char + "24f"
vertex_data = struct.pack(vertex_pattern, *map(float, verts))
vertex1_pattern = endian_char + "fdffdffdffdffdffdffdffdf"
vertex1_data = struct.pack(vertex1_pattern, *map(float, verts))
face_char_pattern = endian_char + "44b"
face_char_data = struct.pack(face_char_pattern, *map(int, faces))
header = "\n".join(
[
"ply",
format,
"element vertex 8",
"property float x",
"property float y",
"property float z",
"element vertex1 8",
"property float x",
"property double y",
"property float z",
"element face 6",
"property list uchar uchar vertex_index",
"element irregular_list 3",
"property list uchar uchar vertex_index",
"element mixed 2",
"property list short uint foo",
"property short bar",
"element minus_ones 1",
"property char 1",
"property uchar 2",
"property short 3",
"property ushort 4",
"property int 5",
"property uint 6",
"end_header\n",
]
)
ply_file = b"".join(
[
header.encode("ascii"),
vertex_data,
vertex1_data,
face_char_data,
mixed_data,
minus_one_data,
]
)
metadata, data = _load_ply_raw(BytesIO(ply_file))
self.assertFalse(metadata.ascii)
self.assertEqual(len(data), 6)
self.assertTupleEqual(data["face"].shape, (6, 4))
self.assertClose([0, 1, 2, 3], data["face"][0])
self.assertClose([3, 7, 4, 0], data["face"][5])
self.assertTupleEqual(data["vertex"].shape, (8, 3))
self.assertEqual(len(data["vertex1"]), 8)
self.assertClose(data["vertex"], data["vertex1"])
self.assertClose(data["vertex"].flatten(), list(map(float, verts)))
irregular = data["irregular_list"]
self.assertEqual(len(irregular), 3)
self.assertEqual(type(irregular), list)
[x] = irregular[0]
self.assertClose(x, [0, 1, 2, 3])
[x] = irregular[1]
self.assertClose(x, [7, 6, 5, 4])
[x] = irregular[2]
self.assertClose(x, [4, 5, 1])
mixed = data["mixed"]
self.assertEqual(len(mixed), 2)
self.assertEqual(len(mixed[0]), 2)
self.assertEqual(len(mixed[1]), 2)
self.assertEqual(mixed[0][1], 3 * 256 + 3)
self.assertEqual(len(mixed[0][0]), 0)
self.assertEqual(mixed[1][1], (2 if big_endian else 2 * 256))
base = 1 + 256 + 256 * 256
self.assertEqual(len(mixed[1][0]), 1)
self.assertEqual(mixed[1][0][0], base if big_endian else 256 * base)
self.assertListEqual(
data["minus_ones"], [(-1, 255, -1, 65535, -1, 4294967295)]
)
def test_bad_ply_syntax(self):
"""Some syntactically bad ply files."""
lines = [
"ply",
"format ascii 1.0",
"comment dashfadskfj;k",
"element vertex 1",
"property float x",
"element listy 1",
"property list uint int x",
"end_header",
"0",
"0",
]
lines2 = lines.copy()
# this is ok
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[0] = "PLY"
with self.assertRaisesRegex(ValueError, "Invalid file header."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[2] = "#this is a comment"
with self.assertRaisesRegex(ValueError, "Invalid line.*"):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[3] = lines[4]
lines2[4] = lines[3]
with self.assertRaisesRegex(
ValueError, "Encountered property before any element."
):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[8] = "1 2"
with self.assertRaisesRegex(
ValueError, "Inconsistent data for vertex."
):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines[:-1]
with self.assertRaisesRegex(ValueError, "Not enough data for listy."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[5] = "element listy 2"
with self.assertRaisesRegex(ValueError, "Not enough data for listy."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2.insert(4, "property short x")
with self.assertRaisesRegex(
ValueError, "Cannot have two properties called x in vertex."
):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2.insert(4, "property zz short")
with self.assertRaisesRegex(ValueError, "Invalid datatype: zz"):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2.append("3")
with self.assertRaisesRegex(ValueError, "Extra data at end of file."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2.append("comment foo")
with self.assertRaisesRegex(ValueError, "Extra data at end of file."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2.insert(4, "element bad 1")
with self.assertRaisesRegex(
ValueError, "Found an element with no properties."
):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[-1] = "3 2 3 3"
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[-1] = "3 1 2 3 4"
msg = "A line of listy data did not have the specified length."
with self.assertRaisesRegex(ValueError, msg):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
lines2[3] = "element vertex one"
msg = "Number of items for vertex was not a number."
with self.assertRaisesRegex(ValueError, msg):
_load_ply_raw(StringIO("\n".join(lines2)))
# Heterogenous cases
lines2 = lines.copy()
lines2.insert(4, "property double y")
with self.assertRaisesRegex(
ValueError, "Too little data for an element."
):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2[-2] = "3.3 4.2"
_load_ply_raw(StringIO("\n".join(lines2)))
lines2[-2] = "3.3 4.3 2"
with self.assertRaisesRegex(
ValueError, "Too much data for an element."
):
_load_ply_raw(StringIO("\n".join(lines2)))
# Now make the ply file actually be readable as a Mesh
with self.assertRaisesRegex(
ValueError, "The ply file has no face element."
):
load_ply(StringIO("\n".join(lines)))
lines2 = lines.copy()
lines2[5] = "element face 1"
with self.assertRaisesRegex(ValueError, "Invalid vertices in file."):
load_ply(StringIO("\n".join(lines2)))
lines2.insert(5, "property float z")
lines2.insert(5, "property float y")
lines2[-2] = "0 0 0"
with self.assertRaisesRegex(
ValueError, "Faces must have at least 3 vertices."
):
load_ply(StringIO("\n".join(lines2)))
# Good one
lines2[-1] = "3 0 0 0"
load_ply(StringIO("\n".join(lines2)))
@staticmethod
def save_ply_bm(V: int, F: int):
verts_list = torch.tensor(V * [[0.11, 0.22, 0.33]]).view(-1, 3)
faces_list = torch.tensor(F * [[0, 1, 2]]).view(-1, 3)
def save_mesh():
file = StringIO()
save_ply(file, verts_list, faces_list, 2)
return save_mesh
@staticmethod
def load_ply_bm(V: int, F: int):
verts = torch.tensor([[0.1, 0.2, 0.3]]).expand(V, 3)
faces = torch.tensor([[0, 1, 2]], dtype=torch.int64).expand(F, 3)
ply_file = StringIO()
save_ply(ply_file, verts=verts, faces=faces)
ply = ply_file.getvalue()
# Recreate stream so it's unaffected by how it was created.
def load_mesh():
ply_file = StringIO(ply)
verts, faces = load_ply(ply_file)
return load_mesh
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import functools
import unittest
import torch
from pytorch3d import _C
from pytorch3d.renderer.mesh.rasterize_meshes import (
rasterize_meshes,
rasterize_meshes_python,
)
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
class TestRasterizeMeshes(unittest.TestCase):
def test_simple_python(self):
device = torch.device("cpu")
self._simple_triangle_raster(
rasterize_meshes_python, device, bin_size=-1
) # don't set binsize
self._simple_blurry_raster(rasterize_meshes_python, device, bin_size=-1)
self._test_behind_camera(rasterize_meshes_python, device, bin_size=-1)
self._test_perspective_correct(
rasterize_meshes_python, device, bin_size=-1
)
def test_simple_cpu_naive(self):
device = torch.device("cpu")
self._simple_triangle_raster(rasterize_meshes, device)
self._simple_blurry_raster(rasterize_meshes, device)
self._test_behind_camera(rasterize_meshes, device)
self._test_perspective_correct(rasterize_meshes, device)
def test_simple_cuda_naive(self):
device = torch.device("cuda:0")
self._simple_triangle_raster(rasterize_meshes, device, bin_size=0)
self._simple_blurry_raster(rasterize_meshes, device, bin_size=0)
self._test_behind_camera(rasterize_meshes, device, bin_size=0)
self._test_perspective_correct(rasterize_meshes, device, bin_size=0)
def test_simple_cuda_binned(self):
device = torch.device("cuda:0")
self._simple_triangle_raster(rasterize_meshes, device, bin_size=5)
self._simple_blurry_raster(rasterize_meshes, device, bin_size=5)
self._test_behind_camera(rasterize_meshes, device, bin_size=5)
self._test_perspective_correct(rasterize_meshes, device, bin_size=5)
def test_python_vs_cpu_vs_cuda(self):
torch.manual_seed(231)
device = torch.device("cpu")
image_size = 32
blur_radius = 0.1 ** 2
faces_per_pixel = 3
for d in ["cpu", "cuda"]:
device = torch.device(d)
compare_grads = True
# Mesh with a single face.
verts1 = torch.tensor(
[[0.0, 0.6, 0.1], [-0.7, -0.4, 0.5], [0.7, -0.4, 0.7]],
dtype=torch.float32,
requires_grad=True,
device=device,
)
faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
meshes1 = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes1, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.detach().clone()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
# Mesh with multiple faces.
# fmt: off
verts1 = torch.tensor(
[
[ -0.5, 0.0, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.5], # noqa: E241, E201
[ 0.5, 0.0, 0.7], # noqa: E241, E201
[-0.25, 0.0, 0.9], # noqa: E241, E201
[ 0.26, 0.5, 0.8], # noqa: E241, E201
[ 0.76, 0.0, 0.8], # noqa: E241, E201
[-0.41, 0.0, 0.5], # noqa: E241, E201
[ 0.61, 0.6, 0.6], # noqa: E241, E201
[ 0.41, 0.0, 0.5], # noqa: E241, E201
[ -0.2, 0.0, -0.5], # noqa: E241, E201
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
requires_grad=True
)
faces1 = torch.tensor(
[
[ 1, 0, 2], # noqa: E241, E201
[ 4, 3, 5], # noqa: E241, E201
[ 7, 6, 8], # noqa: E241, E201
[10, 9, 11] # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.clone().detach()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
# Icosphere
meshes = ico_sphere(device=device)
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.detach().clone()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
def test_cpu_vs_cuda_naive(self):
"""
Compare naive versions of cuda and cpp
"""
torch.manual_seed(231)
image_size = 64
radius = 0.1 ** 2
faces_per_pixel = 3
device = torch.device("cpu")
meshes_cpu = ico_sphere(0, device)
verts1, faces1 = meshes_cpu.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes_cpu = Meshes(verts=[verts1], faces=[faces1])
device = torch.device("cuda:0")
meshes_cuda = ico_sphere(0, device)
verts2, faces2 = meshes_cuda.get_mesh_verts_faces(0)
verts2.requires_grad = True
meshes_cuda = Meshes(verts=[verts2], faces=[faces2])
args_cpu = (meshes_cpu, image_size, radius, faces_per_pixel)
args_cuda = (meshes_cuda, image_size, radius, faces_per_pixel, 0, 0)
self._compare_impls(
rasterize_meshes,
rasterize_meshes,
args_cpu,
args_cuda,
verts1,
verts2,
compare_grads=True,
)
def test_coarse_cpu(self):
return self._test_coarse_rasterize(torch.device("cpu"))
def test_coarse_cuda(self):
return self._test_coarse_rasterize(torch.device("cuda:0"))
def test_cpp_vs_cuda_naive_vs_cuda_binned(self):
# Make sure that the backward pass runs for all pathways
image_size = 64 # test is too slow for very large images.
N = 1
radius = 0.1 ** 2
faces_per_pixel = 3
grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel)
grad_dist = torch.randn(N, image_size, image_size, faces_per_pixel)
grad_bary = torch.randn(N, image_size, image_size, faces_per_pixel, 3)
device = torch.device("cpu")
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
# Option I: CPU, naive
args = (meshes, image_size, radius, faces_per_pixel)
idx1, zbuf1, bary1, dist1 = rasterize_meshes(*args)
loss = (
(zbuf1 * grad_zbuf).sum()
+ (dist1 * grad_dist).sum()
+ (bary1 * grad_bary).sum()
)
loss.backward()
idx1 = idx1.data.cpu().clone()
zbuf1 = zbuf1.data.cpu().clone()
dist1 = dist1.data.cpu().clone()
grad1 = verts.grad.data.cpu().clone()
# Option II: CUDA, naive
device = torch.device("cuda:0")
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
args = (meshes, image_size, radius, faces_per_pixel, 0, 0)
idx2, zbuf2, bary2, dist2 = rasterize_meshes(*args)
grad_zbuf = grad_zbuf.cuda()
grad_dist = grad_dist.cuda()
grad_bary = grad_bary.cuda()
loss = (
(zbuf2 * grad_zbuf).sum()
+ (dist2 * grad_dist).sum()
+ (bary2 * grad_bary).sum()
)
loss.backward()
idx2 = idx2.data.cpu().clone()
zbuf2 = zbuf2.data.cpu().clone()
dist2 = dist2.data.cpu().clone()
grad2 = verts.grad.data.cpu().clone()
# Option III: CUDA, binned
device = torch.device("cuda:0")
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
args = (meshes, image_size, radius, faces_per_pixel, 32, 500)
idx3, zbuf3, bary3, dist3 = rasterize_meshes(*args)
loss = (
(zbuf3 * grad_zbuf).sum()
+ (dist3 * grad_dist).sum()
+ (bary3 * grad_bary).sum()
)
loss.backward()
idx3 = idx3.data.cpu().clone()
zbuf3 = zbuf3.data.cpu().clone()
dist3 = dist3.data.cpu().clone()
grad3 = verts.grad.data.cpu().clone()
# Make sure everything was the same
self.assertTrue((idx1 == idx2).all().item())
self.assertTrue((idx1 == idx3).all().item())
self.assertTrue(torch.allclose(zbuf1, zbuf2, atol=1e-6))
self.assertTrue(torch.allclose(zbuf1, zbuf3, atol=1e-6))
self.assertTrue(torch.allclose(dist1, dist2, atol=1e-6))
self.assertTrue(torch.allclose(dist1, dist3, atol=1e-6))
self.assertTrue(torch.allclose(grad1, grad2, rtol=5e-3)) # flaky test
self.assertTrue(torch.allclose(grad1, grad3, rtol=5e-3))
self.assertTrue(torch.allclose(grad2, grad3, rtol=5e-3))
def test_compare_coarse_cpu_vs_cuda(self):
torch.manual_seed(231)
N = 1
image_size = 512
blur_radius = 0.0
bin_size = 32
max_faces_per_bin = 20
device = torch.device("cpu")
meshes = ico_sphere(2, device)
faces = meshes.faces_packed()
verts = meshes.verts_packed()
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
args = (
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
bin_faces_cpu = _C._rasterize_meshes_coarse(*args)
device = torch.device("cuda:0")
meshes = ico_sphere(2, device)
faces = meshes.faces_packed()
verts = meshes.verts_packed()
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
args = (
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
bin_faces_cuda = _C._rasterize_meshes_coarse(*args)
# Bin faces might not be the same: CUDA version might write them in
# any order. But if we sort the non-(-1) elements of the CUDA output
# then they should be the same.
for n in range(N):
for by in range(bin_faces_cpu.shape[1]):
for bx in range(bin_faces_cpu.shape[2]):
K = (bin_faces_cuda[n, by, bx] != -1).sum().item()
idxs_cpu = bin_faces_cpu[n, by, bx].tolist()
idxs_cuda = bin_faces_cuda[n, by, bx].tolist()
idxs_cuda[:K] = sorted(idxs_cuda[:K])
self.assertEqual(idxs_cpu, idxs_cuda)
def test_python_vs_cpp_perspective_correct(self):
torch.manual_seed(232)
N = 2
V = 10
F = 5
verts1 = torch.randn(N, V, 3, requires_grad=True)
verts2 = verts1.detach().clone().requires_grad_(True)
faces = torch.randint(V, size=(N, F, 3))
meshes1 = Meshes(verts1, faces)
meshes2 = Meshes(verts2, faces)
kwargs = {"image_size": 24, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
def test_cpp_vs_cuda_perspective_correct(self):
meshes = ico_sphere(2, device=torch.device("cpu"))
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes1 = Meshes(verts=[verts1], faces=[faces1])
verts2 = verts1.detach().cuda().requires_grad_(True)
faces2 = faces1.detach().clone().cuda()
meshes2 = Meshes(verts=[verts2], faces=[faces2])
kwargs = {"image_size": 64, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
def test_cuda_naive_vs_binned_perspective_correct(self):
meshes = ico_sphere(2, device=torch.device("cuda"))
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes1 = Meshes(verts=[verts1], faces=[faces1])
verts2 = verts1.detach().clone().requires_grad_(True)
faces2 = faces1.detach().clone()
meshes2 = Meshes(verts=[verts2], faces=[faces2])
kwargs = {"image_size": 64, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, bin_size=0, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=8, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
def _compare_impls(
self,
fn1,
fn2,
args1,
args2,
grad_var1=None,
grad_var2=None,
compare_grads=False,
):
idx1, zbuf1, bary1, dist1 = fn1(*args1)
idx2, zbuf2, bary2, dist2 = fn2(*args2)
self.assertTrue((idx1.cpu() == idx2.cpu()).all().item())
self.assertTrue(torch.allclose(zbuf1.cpu(), zbuf2.cpu(), rtol=1e-4))
self.assertTrue(torch.allclose(dist1.cpu(), dist2.cpu(), rtol=6e-3))
self.assertTrue(torch.allclose(bary1.cpu(), bary2.cpu(), rtol=1e-3))
if not compare_grads:
return
# Compare gradients.
torch.manual_seed(231)
grad_zbuf = torch.randn_like(zbuf1)
grad_dist = torch.randn_like(dist1)
grad_bary = torch.randn_like(bary1)
loss1 = (
(dist1 * grad_dist).sum()
+ (zbuf1 * grad_zbuf).sum()
+ (bary1 * grad_bary).sum()
)
loss1.backward()
grad_verts1 = grad_var1.grad.data.clone().cpu()
grad_zbuf = grad_zbuf.to(zbuf2)
grad_dist = grad_dist.to(dist2)
grad_bary = grad_bary.to(bary2)
loss2 = (
(dist2 * grad_dist).sum()
+ (zbuf2 * grad_zbuf).sum()
+ (bary2 * grad_bary).sum()
)
grad_var1.grad.data.zero_()
loss2.backward()
grad_verts2 = grad_var2.grad.data.clone().cpu()
self.assertTrue(torch.allclose(grad_verts1, grad_verts2, rtol=1e-3))
def _test_perspective_correct(
self, rasterize_meshes_fn, device, bin_size=None
):
# fmt: off
verts = torch.tensor([
[-0.4, -0.4, 10], # noqa: E241, E201
[ 0.4, -0.4, 10], # noqa: E241, E201
[ 0.0, 0.4, 20], # noqa: E241, E201
], dtype=torch.float32, device=device)
# fmt: on
faces = torch.tensor([[0, 1, 2]], device=device)
meshes = Meshes(verts=[verts], faces=[faces])
kwargs = {
"meshes": meshes,
"image_size": 11,
"faces_per_pixel": 1,
"blur_radius": 0.2,
"perspective_correct": False,
}
if bin_size != -1:
kwargs["bin_size"] = bin_size
# Run with and without perspective correction
idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)
kwargs["perspective_correct"] = True
idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)
# idx and dists should be the same with or without perspecitve correction
# fmt: off
idx_expected = torch.tensor([
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
], dtype=torch.int64, device=device).view(1, 11, 11, 1)
dists_expected = torch.tensor([
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 0.1283, 0.1071, 0.1071, 0.1071, 0.1071, 0.1071, 0.1283, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, 0.1283, 0.0423, 0.0212, 0.0212, 0.0212, 0.0212, 0.0212, 0.0423, 0.1283, -1.0000], # noqa: E241, E201
[-1.0000, 0.1084, 0.0225, -0.0003, -0.0013, -0.0013, -0.0013, -0.0003, 0.0225, 0.1084, -1.0000], # noqa: E241, E201
[-1.0000, 0.1523, 0.0518, 0.0042, -0.0095, -0.0476, -0.0095, 0.0042, 0.0518, 0.1523, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 0.0955, 0.0214, -0.0003, -0.0320, -0.0003, 0.0214, 0.0955, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 0.1523, 0.0518, 0.0042, -0.0095, 0.0042, 0.0518, 0.1523, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 0.0955, 0.0214, -0.0003, 0.0214, 0.0955, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 0.1523, 0.0542, 0.0212, 0.0542, 0.1523, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, 0.1402, 0.1071, 0.1402, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
# zbuf and barycentric will be different with perspective correction
zbuf_f_expected = torch.tensor([
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, -1.0000], # noqa: E241, E201
[-1.0000, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, -1.0000], # noqa: E241, E201
[-1.0000, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 15.0000, 15.0000, 15.0000, 15.0000, 15.0000, 15.0000, 15.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 19.5455, 19.5455, 19.5455, 19.5455, 19.5455, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 21.8182, 21.8182, 21.8182, 21.8182, 21.8182, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, 24.0909, 24.0909, 24.0909, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
zbuf_t_expected = torch.tensor([
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, -1.0000], # noqa: E241, E201
[-1.0000, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, -1.0000], # noqa: E241, E201
[-1.0000, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 19.1304, 19.1304, 19.1304, 19.1304, 19.1304, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, 24.4444, 24.4444, 24.4444, 24.4444, 24.4444, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, 33.8462, 33.8462, 33.8461, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000, -1.0000], # noqa: E241, E201
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
# fmt: on
self.assertTrue(torch.all(idx_f == idx_expected).item())
self.assertTrue(torch.all(idx_t == idx_expected).item())
dists_t_max_diff = (dists_t - dists_expected).abs().max().item()
dists_f_max_diff = (dists_f - dists_expected).abs().max().item()
self.assertLess(dists_t_max_diff, 1e-4)
self.assertLess(dists_f_max_diff, 1e-4)
zbuf_f_max_diff = (zbuf_f - zbuf_f_expected).abs().max().item()
zbuf_t_max_diff = (zbuf_t - zbuf_t_expected).abs().max().item()
self.assertLess(zbuf_f_max_diff, 1e-4)
self.assertLess(zbuf_t_max_diff, 1e-4)
# Check barycentrics by using them to re-compute zbuf
z0 = verts[0, 2]
z1 = verts[1, 2]
z2 = verts[2, 2]
w0_f, w1_f, w2_f = bary_f.unbind(dim=4)
w0_t, w1_t, w2_t = bary_t.unbind(dim=4)
zbuf_f_bary = w0_f * z0 + w1_f * z1 + w2_f * z2
zbuf_t_bary = w0_t * z0 + w1_t * z1 + w2_t * z2
mask = idx_expected != -1
zbuf_f_bary_diff = (
(zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max()
)
zbuf_t_bary_diff = (
(zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max()
)
self.assertLess(zbuf_f_bary_diff, 1e-4)
self.assertLess(zbuf_t_bary_diff, 1e-4)
def _test_behind_camera(self, rasterize_meshes_fn, device, bin_size=None):
"""
All verts are behind the camera so nothing should get rasterized.
"""
N = 1
# fmt: off
verts = torch.tensor(
[
[ -0.5, 0.0, -0.1], # noqa: E241, E201
[ 0.0, 0.6, -0.1], # noqa: E241, E201
[ 0.5, 0.0, -0.1], # noqa: E241, E201
[-0.25, 0.0, -0.9], # noqa: E241, E201
[ 0.25, 0.5, -0.9], # noqa: E241, E201
[ 0.75, 0.0, -0.9], # noqa: E241, E201
[ -0.4, 0.0, -0.5], # noqa: E241, E201
[ 0.6, 0.6, -0.5], # noqa: E241, E201
[ 0.8, 0.0, -0.5], # noqa: E241, E201
[ -0.2, 0.0, -0.5], # noqa: E241, E201
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
faces = torch.tensor(
[[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
image_size = 16
faces_per_pixel = 1
radius = 0.2
idx_expected = torch.full(
(N, image_size, image_size, faces_per_pixel),
fill_value=-1,
dtype=torch.int64,
device=device,
)
bary_expected = torch.full(
(N, image_size, image_size, faces_per_pixel, 3),
fill_value=-1,
dtype=torch.float32,
device=device,
)
zbuf_expected = torch.full(
(N, image_size, image_size, faces_per_pixel),
fill_value=-1,
dtype=torch.float32,
device=device,
)
dists_expected = zbuf_expected.clone()
if bin_size == -1:
# naive python version with no binning
idx, zbuf, bary, dists = rasterize_meshes_fn(
meshes, image_size, radius, faces_per_pixel
)
else:
idx, zbuf, bary, dists = rasterize_meshes_fn(
meshes, image_size, radius, faces_per_pixel, bin_size
)
idx_same = (idx == idx_expected).all().item()
zbuf_same = (zbuf == zbuf_expected).all().item()
self.assertTrue(idx_same)
self.assertTrue(zbuf_same)
self.assertTrue(torch.allclose(bary, bary_expected))
self.assertTrue(torch.allclose(dists, dists_expected))
def _simple_triangle_raster(self, raster_fn, device, bin_size=None):
image_size = 10
# Mesh with a single face.
verts0 = torch.tensor(
[[-0.7, -0.4, 0.1], [0.0, 0.6, 0.1], [0.7, -0.4, 0.1]],
dtype=torch.float32,
device=device,
)
faces0 = torch.tensor([[1, 0, 2]], dtype=torch.int64, device=device)
# Mesh with two overlapping faces.
# fmt: off
verts1 = torch.tensor(
[
[-0.7, -0.4, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.1], # noqa: E241, E201
[ 0.7, -0.4, 0.1], # noqa: E241, E201
[-0.7, 0.4, 0.5], # noqa: E241, E201
[ 0.0, -0.6, 0.5], # noqa: E241, E201
[ 0.7, 0.4, 0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt on
faces1 = torch.tensor(
[[1, 0, 2], [3, 4, 5]], dtype=torch.int64, device=device
)
# fmt off
expected_p2face_k0 = torch.tensor(
[
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 1, 1, 1, 1, 1, 1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 1, 1, 1, 1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 1, 1, 1, 1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 2, 2, 1, 1, 2, 2, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
],
dtype=torch.int64,
device=device,
)
expected_zbuf_k0 = torch.tensor(
[
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.5, 0.5, 0.1, 0.1, 0.5, 0.5, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
],
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts0, verts1], faces=[faces0, faces1])
if bin_size == -1:
# simple python case with no binning
p2face, zbuf, bary, pix_dists = raster_fn(
meshes, image_size, 0.0, 2
)
else:
p2face, zbuf, bary, pix_dists = raster_fn(
meshes, image_size, 0.0, 2, bin_size
)
# k = 0, closest point.
self.assertTrue(torch.allclose(p2face[..., 0], expected_p2face_k0))
self.assertTrue(torch.allclose(zbuf[..., 0], expected_zbuf_k0))
# k = 1, second closest point.
expected_p2face_k1 = expected_p2face_k0.clone()
expected_p2face_k1[0, :] = (
torch.ones_like(expected_p2face_k1[0, :]) * -1
)
# fmt: off
expected_p2face_k1[1, :] = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
expected_zbuf_k1 = expected_zbuf_k0.clone()
expected_zbuf_k1[0, :] = torch.ones_like(expected_zbuf_k1[0, :]) * -1
expected_zbuf_k1[1, :] = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0.5, 0.5, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.5, 0.5, 0.5, 0.5, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.5, 0.5, 0.5, 0.5, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0.5, 0.5, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
self.assertTrue(torch.allclose(p2face[..., 1], expected_p2face_k1))
self.assertTrue(torch.allclose(zbuf[..., 1], expected_zbuf_k1))
def _simple_blurry_raster(self, raster_fn, device, bin_size=None):
"""
Check that pix_to_face, dist and zbuf values are invariant to the
ordering of faces.
"""
image_size = 10
blur_radius = 0.12 ** 2
faces_per_pixel = 1
# fmt: off
verts = torch.tensor(
[
[ -0.5, 0.0, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.1], # noqa: E241, E201
[ 0.5, 0.0, 0.1], # noqa: E241, E201
[-0.25, 0.0, 0.9], # noqa: E241, E201
[0.25, 0.5, 0.9], # noqa: E241, E201
[0.75, 0.0, 0.9], # noqa: E241, E201
[-0.4, 0.0, 0.5], # noqa: E241, E201
[ 0.6, 0.6, 0.5], # noqa: E241, E201
[ 0.8, 0.0, 0.5], # noqa: E241, E201
[-0.2, 0.0, -0.5], # noqa: E241, E201 face behind the camera
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
faces_packed = torch.tensor(
[[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],
dtype=torch.int64,
device=device,
)
expected_p2f = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 2, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 2, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, 2, 2, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0, 0, 2, 2, 2, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
expected_zbuf = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.5, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.5, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.5, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0.1, 0.1, 0.5, 0.5, 0.5, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
for i, order in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):
faces = faces_packed[order] # rearrange order of faces.
mesh = Meshes(verts=[verts], faces=[faces])
if bin_size == -1:
# simple python case with no binning
pix_to_face, zbuf, bary_coords, dists = raster_fn(
mesh, image_size, blur_radius, faces_per_pixel
)
else:
pix_to_face, zbuf, bary_coords, dists = raster_fn(
mesh, image_size, blur_radius, faces_per_pixel, bin_size
)
if i == 0:
expected_dists = dists
p2f = expected_p2f.clone()
p2f[expected_p2f == 0] = order.index(0)
p2f[expected_p2f == 1] = order.index(1)
p2f[expected_p2f == 2] = order.index(2)
self.assertTrue(torch.allclose(pix_to_face.squeeze(), p2f))
self.assertTrue(
torch.allclose(zbuf.squeeze(), expected_zbuf, rtol=1e-5)
)
self.assertTrue(torch.allclose(dists, expected_dists))
def _test_coarse_rasterize(self, device):
image_size = 16
blur_radius = 0.2 ** 2
bin_size = 8
max_faces_per_bin = 3
# fmt: off
verts = torch.tensor(
[
[-0.5, 0.0, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.1], # noqa: E241, E201
[ 0.5, 0.0, 0.1], # noqa: E241, E201
[-0.3, 0.0, 0.4], # noqa: E241, E201
[ 0.3, 0.5, 0.4], # noqa: E241, E201
[0.75, 0.0, 0.4], # noqa: E241, E201
[-0.4, -0.3, 0.9], # noqa: E241, E201
[ 0.2, -0.7, 0.9], # noqa: E241, E201
[ 0.4, -0.3, 0.9], # noqa: E241, E201
[-0.4, 0.0, -1.5], # noqa: E241, E201
[ 0.6, 0.6, -1.5], # noqa: E241, E201
[ 0.8, 0.0, -1.5], # noqa: E241, E201
],
device=device,
)
faces = torch.tensor(
[
[ 1, 0, 2], # noqa: E241, E201 bin 00 and bin 01
[ 4, 3, 5], # noqa: E241, E201 bin 00 and bin 01
[ 7, 6, 8], # noqa: E241, E201 bin 10 and bin 11
[10, 9, 11], # noqa: E241, E201 negative z, should not appear.
],
dtype=torch.int64,
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts], faces=[faces])
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
bin_faces_expected = (
torch.ones(
(1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device
)
* -1
)
bin_faces_expected[0, 0, 0, 0:2] = torch.tensor([0, 1])
bin_faces_expected[0, 0, 1, 0:2] = torch.tensor([0, 1])
bin_faces_expected[0, 1, 0, 0:3] = torch.tensor([0, 1, 2])
bin_faces_expected[0, 1, 1, 0:3] = torch.tensor([0, 1, 2])
bin_faces = _C._rasterize_meshes_coarse(
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
bin_faces_same = (
bin_faces.squeeze().flip(dims=[0]) == bin_faces_expected
).all()
self.assertTrue(bin_faces_same.item() == 1)
@staticmethod
def rasterize_meshes_python_with_init(
num_meshes: int, ico_level: int, image_size: int, blur_radius: float
):
device = torch.device("cpu")
meshes = ico_sphere(ico_level, device)
meshes_batch = meshes.extend(num_meshes)
def rasterize():
rasterize_meshes_python(meshes_batch, image_size, blur_radius)
return rasterize
@staticmethod
def rasterize_meshes_cpu_with_init(
num_meshes: int, ico_level: int, image_size: int, blur_radius: float
):
meshes = ico_sphere(ico_level, torch.device("cpu"))
meshes_batch = meshes.extend(num_meshes)
def rasterize():
rasterize_meshes(meshes_batch, image_size, blur_radius, bin_size=0)
return rasterize
@staticmethod
def rasterize_meshes_cuda_with_init(
num_meshes: int,
ico_level: int,
image_size: int,
blur_radius: float,
bin_size: int,
max_faces_per_bin: int,
):
meshes = ico_sphere(ico_level, torch.device("cuda:0"))
meshes_batch = meshes.extend(num_meshes)
torch.cuda.synchronize()
def rasterize():
rasterize_meshes(
meshes_batch,
image_size,
blur_radius,
8,
bin_size,
max_faces_per_bin,
)
torch.cuda.synchronize()
return rasterize
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
from pathlib import Path
import torch
from PIL import Image
from pytorch3d.renderer.cameras import (
OpenGLPerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.mesh.rasterizer import (
MeshRasterizer,
RasterizationSettings,
)
from pytorch3d.utils.ico_sphere import ico_sphere
DATA_DIR = Path(__file__).resolve().parent / "data"
DEBUG = False # Set DEBUG to true to save outputs from the tests.
def convert_image_to_binary_mask(filename):
with Image.open(filename) as raw_image:
image = torch.from_numpy(np.array(raw_image))
min = image.min()
max = image.max()
image_norm = (image - min) / (max - min)
image_norm[image_norm > 0] == 1.0
image_norm = image_norm.to(torch.int64)
return image_norm
class TestMeshRasterizer(unittest.TestCase):
def test_simple_sphere(self):
device = torch.device("cuda:0")
ref_filename = "test_rasterized_sphere.png"
image_ref_filename = DATA_DIR / ref_filename
# Rescale image_ref to the 0 - 1 range and convert to a binary mask.
image_ref = convert_image_to_binary_mask(image_ref_filename)
# Init mesh
sphere_mesh = ico_sphere(5, device)
# Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
)
# Init rasterizer
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
####################################
# 1. Test rasterizing a single mesh
####################################
fragments = rasterizer(sphere_mesh)
image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
# Convert pix_to_face to a binary mask
image[image >= 0] = 1.0
image[image < 0] = 0.0
if DEBUG:
Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_test_rasterized_sphere.png"
)
self.assertTrue(torch.allclose(image, image_ref))
##################################
# 2. Test with a batch of meshes
##################################
batch_size = 10
sphere_meshes = sphere_mesh.extend(batch_size)
fragments = rasterizer(sphere_meshes)
for i in range(batch_size):
image = fragments.pix_to_face[i, ..., 0].squeeze().cpu()
image[image >= 0] = 1.0
image[image < 0] = 0.0
self.assertTrue(torch.allclose(image, image_ref))
####################################################
# 3. Test that passing kwargs to rasterizer works.
####################################################
# Change the view transform to zoom in.
R, T = look_at_view_transform(2.0, 0, 0, device=device)
fragments = rasterizer(sphere_mesh, R=R, T=T)
image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
image[image >= 0] = 1.0
image[image < 0] = 0.0
ref_filename = "test_rasterized_sphere_zoom.png"
image_ref_filename = DATA_DIR / ref_filename
image_ref = convert_image_to_binary_mask(image_ref_filename)
if DEBUG:
Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_test_rasterized_sphere_zoom.png"
)
self.assertTrue(torch.allclose(image, image_ref))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment