Commit dbf06b50 authored by facebook-github-bot's avatar facebook-github-bot
Browse files

Initial commit

fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
parents
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
Sanity checks for output images from the renderer.
"""
import numpy as np
import unittest
from pathlib import Path
import torch
from PIL import Image
from pytorch3d.io import load_obj
from pytorch3d.renderer.cameras import (
OpenGLPerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.materials import Materials
from pytorch3d.renderer.mesh.rasterizer import (
MeshRasterizer,
RasterizationSettings,
)
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import (
BlendParams,
GouradShader,
PhongShader,
SilhouetteShader,
TexturedPhongShader,
)
from pytorch3d.renderer.mesh.texturing import Textures
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere
# Save out images generated in the tests for debugging
# All saved images have prefix DEBUG_
DEBUG = False
DATA_DIR = Path(__file__).resolve().parent / "data"
def load_rgb_image(filename, data_dir=DATA_DIR):
filepath = data_dir / filename
with Image.open(filepath) as raw_image:
image = torch.from_numpy(np.array(raw_image) / 255.0)
image = image.to(dtype=torch.float32)
return image[..., :3]
class TestRenderingMeshes(unittest.TestCase):
def test_simple_sphere(self, elevated_camera=False):
"""
Test output of phong and gourad shading matches a reference image using
the default values for the light sources.
Args:
elevated_camera: Defines whether the camera observing the scene should
have an elevation of 45 degrees.
"""
device = torch.device("cuda:0")
# Init mesh
sphere_mesh = ico_sphere(5, device)
verts_padded = sphere_mesh.verts_padded()
faces_padded = sphere_mesh.faces_padded()
textures = Textures(verts_rgb=torch.ones_like(verts_padded))
sphere_mesh = Meshes(
verts=verts_padded, faces=faces_padded, textures=textures
)
# Init rasterizer settings
if elevated_camera:
R, T = look_at_view_transform(2.7, 45.0, 0.0)
postfix = "_elevated_camera"
else:
R, T = look_at_view_transform(2.7, 0.0, 0.0)
postfix = ""
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
# Init renderer
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
renderer = MeshRenderer(
rasterizer=rasterizer,
shader=PhongShader(
lights=lights, cameras=cameras, materials=materials
),
)
images = renderer(sphere_mesh)
rgb = images[0, ..., :3].squeeze().cpu()
if DEBUG:
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix
)
# Load reference image
image_ref_phong = load_rgb_image(
"test_simple_sphere_illuminated%s.png" % postfix
)
self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))
###################################
# Move the light behind the object
###################################
# Check the image is dark
lights.location[..., 2] = +2.0
images = renderer(sphere_mesh, lights=lights)
rgb = images[0, ..., :3].squeeze().cpu()
if DEBUG:
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix
)
# Load reference image
image_ref_phong_dark = load_rgb_image(
"test_simple_sphere_dark%s.png" % postfix
)
self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))
######################################
# Change the shader to a GouradShader
######################################
lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
renderer = MeshRenderer(
rasterizer=rasterizer,
shader=GouradShader(
lights=lights, cameras=cameras, materials=materials
),
)
images = renderer(sphere_mesh)
rgb = images[0, ..., :3].squeeze().cpu()
if DEBUG:
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_simple_sphere_light_gourad%s.png" % postfix
)
# Load reference image
image_ref_gourad = load_rgb_image(
"test_simple_sphere_light_gourad%s.png" % postfix
)
self.assertTrue(torch.allclose(rgb, image_ref_gourad, atol=0.005))
self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))
def test_simple_sphere_elevated_camera(self):
"""
Test output of phong and gourad shading matches a reference image using
the default values for the light sources.
The rendering is performed with a camera that has non-zero elevation.
"""
self.test_simple_sphere(elevated_camera=True)
def test_simple_sphere_batched(self):
"""
Test output of phong shading matches a reference image using
the default values for the light sources.
"""
batch_size = 5
device = torch.device("cuda:0")
# Init mesh
sphere_meshes = ico_sphere(5, device).extend(batch_size)
verts_padded = sphere_meshes.verts_padded()
faces_padded = sphere_meshes.faces_padded()
textures = Textures(verts_rgb=torch.ones_like(verts_padded))
sphere_meshes = Meshes(
verts=verts_padded, faces=faces_padded, textures=textures
)
# Init rasterizer settings
dist = torch.tensor([2.7]).repeat(batch_size).to(device)
elev = torch.zeros_like(dist)
azim = torch.zeros_like(dist)
R, T = look_at_view_transform(dist, elev, azim)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
),
shader=PhongShader(
lights=lights, cameras=cameras, materials=materials
),
)
images = renderer(sphere_meshes)
# Load ref image
image_ref = load_rgb_image("test_simple_sphere_illuminated.png")
for i in range(batch_size):
rgb = images[i, ..., :3].squeeze().cpu()
if DEBUG:
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / f"DEBUG_simple_sphere_{i}.png"
)
self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_silhouette_with_grad(self):
"""
Test silhouette blending. Also check that gradient calculation works.
"""
device = torch.device("cuda:0")
ref_filename = "test_silhouette.png"
image_ref_filename = DATA_DIR / ref_filename
sphere_mesh = ico_sphere(5, device)
verts, faces = sphere_mesh.get_mesh_verts_faces(0)
sphere_mesh = Meshes(verts=[verts], faces=[faces])
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
faces_per_pixel=80,
bin_size=0,
)
# Init rasterizer settings
R, T = look_at_view_transform(2.7, 10, 20)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
),
shader=SilhouetteShader(blend_params=blend_params),
)
images = renderer(sphere_mesh)
alpha = images[0, ..., 3].squeeze().cpu()
if DEBUG:
Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_silhouette_grad.png"
)
with Image.open(image_ref_filename) as raw_image_ref:
image_ref = torch.from_numpy(np.array(raw_image_ref))
image_ref = image_ref.to(dtype=torch.float32) / 255.0
self.assertTrue(torch.allclose(alpha, image_ref, atol=0.055))
# Check grad exist
verts.requires_grad = True
sphere_mesh = Meshes(verts=[verts], faces=[faces])
images = renderer(sphere_mesh)
images[0, ...].sum().backward()
self.assertIsNotNone(verts.grad)
def test_texture_map(self):
"""
Test a mesh with a texture map is loaded and rendered correctly
"""
device = torch.device("cuda:0")
DATA_DIR = (
Path(__file__).resolve().parent.parent / "docs/tutorials/data"
)
obj_filename = DATA_DIR / "cow_mesh/cow.obj"
# Load mesh + texture
verts, faces, aux = load_obj(obj_filename)
faces_idx = faces.verts_idx.to(device)
verts = verts.to(device)
texture_uvs = aux.verts_uvs
materials = aux.material_colors
tex_maps = aux.texture_images
# tex_maps is a dictionary of material names as keys and texture images
# as values. Only need the images for this example.
textures = Textures(
maps=list(tex_maps.values()),
faces_uvs=faces.textures_idx.to(torch.int64).to(device)[None, :],
verts_uvs=texture_uvs.to(torch.float32).to(device)[None, :],
)
mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)
# Init rasterizer settings
R, T = look_at_view_transform(2.7, 10, 20)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
)
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
),
shader=TexturedPhongShader(
lights=lights, cameras=cameras, materials=materials
),
)
images = renderer(mesh)
rgb = images[0, ..., :3].squeeze().cpu()
# Load reference image
image_ref = load_rgb_image("test_texture_map.png")
if DEBUG:
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / "DEBUG_texture_map.png"
)
# There's a calculation instability on the corner of the ear of the cow.
# We ignore that pixel.
image_ref[137, 166] = 0
rgb[137, 166] = 0
self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
# Check grad exists
verts = verts.clone()
verts.requires_grad = True
mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)
images = renderer(mesh)
images[0, ...].sum().backward()
self.assertIsNotNone(verts.grad)
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import math
import unittest
import torch
from pytorch3d.transforms.rotation_conversions import (
euler_angles_to_matrix,
matrix_to_euler_angles,
matrix_to_quaternion,
quaternion_apply,
quaternion_multiply,
quaternion_to_matrix,
random_quaternions,
random_rotation,
random_rotations,
)
class TestRandomRotation(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
def test_random_rotation_invariant(self):
"""The image of the x-axis isn't biased among quadrants."""
N = 1000
base = random_rotation()
quadrants = list(itertools.product([False, True], repeat=3))
matrices = random_rotations(N)
transformed = torch.matmul(base, matrices)
transformed2 = torch.matmul(matrices, base)
for k, results in enumerate([matrices, transformed, transformed2]):
counts = {i: 0 for i in quadrants}
for j in range(N):
counts[tuple(i.item() > 0 for i in results[j, 0])] += 1
average = N / 8.0
counts_tensor = torch.tensor(list(counts.values()))
chisquare_statistic = torch.sum(
(counts_tensor - average) * (counts_tensor - average) / average
)
# The 0.1 significance level for chisquare(8-1) is
# scipy.stats.chi2(7).ppf(0.9) == 12.017.
self.assertLess(
chisquare_statistic, 12, (counts, chisquare_statistic, k)
)
class TestRotationConversion(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
def test_from_quat(self):
"""quat -> mtx -> quat"""
data = random_quaternions(13, dtype=torch.float64)
mdata = matrix_to_quaternion(quaternion_to_matrix(data))
self.assertTrue(torch.allclose(data, mdata))
def test_to_quat(self):
"""mtx -> quat -> mtx"""
data = random_rotations(13, dtype=torch.float64)
mdata = quaternion_to_matrix(matrix_to_quaternion(data))
self.assertTrue(torch.allclose(data, mdata))
def test_quat_grad_exists(self):
"""Quaternion calculations are differentiable."""
rotation = random_rotation(requires_grad=True)
modified = quaternion_to_matrix(matrix_to_quaternion(rotation))
[g] = torch.autograd.grad(modified.sum(), rotation)
self.assertTrue(torch.isfinite(g).all())
def _tait_bryan_conventions(self):
return map("".join, itertools.permutations("XYZ"))
def _proper_euler_conventions(self):
letterpairs = itertools.permutations("XYZ", 2)
return (l0 + l1 + l0 for l0, l1 in letterpairs)
def _all_euler_angle_conventions(self):
return itertools.chain(
self._tait_bryan_conventions(), self._proper_euler_conventions()
)
def test_conventions(self):
"""The conventions listings have the right length."""
all = list(self._all_euler_angle_conventions())
self.assertEqual(len(all), 12)
self.assertEqual(len(set(all)), 12)
def test_from_euler(self):
"""euler -> mtx -> euler"""
n_repetitions = 10
# tolerance is how much we keep the middle angle away from the extreme
# allowed values which make the calculation unstable (Gimbal lock).
tolerance = 0.04
half_pi = math.pi / 2
data = torch.zeros(n_repetitions, 3)
data.uniform_(-math.pi, math.pi)
data[:, 1].uniform_(-half_pi + tolerance, half_pi - tolerance)
for convention in self._tait_bryan_conventions():
matrices = euler_angles_to_matrix(data, convention)
mdata = matrix_to_euler_angles(matrices, convention)
self.assertTrue(torch.allclose(data, mdata))
data[:, 1] += half_pi
for convention in self._proper_euler_conventions():
matrices = euler_angles_to_matrix(data, convention)
mdata = matrix_to_euler_angles(matrices, convention)
self.assertTrue(torch.allclose(data, mdata))
def test_to_euler(self):
"""mtx -> euler -> mtx"""
data = random_rotations(13, dtype=torch.float64)
for convention in self._all_euler_angle_conventions():
euler_angles = matrix_to_euler_angles(data, convention)
mdata = euler_angles_to_matrix(euler_angles, convention)
self.assertTrue(torch.allclose(data, mdata))
def test_euler_grad_exists(self):
"""Euler angle calculations are differentiable."""
rotation = random_rotation(dtype=torch.float64, requires_grad=True)
for convention in self._all_euler_angle_conventions():
euler_angles = matrix_to_euler_angles(rotation, convention)
mdata = euler_angles_to_matrix(euler_angles, convention)
[g] = torch.autograd.grad(mdata.sum(), rotation)
self.assertTrue(torch.isfinite(g).all())
def test_quaternion_multiplication(self):
"""Quaternion and matrix multiplication are equivalent."""
a = random_quaternions(15, torch.float64).reshape((3, 5, 4))
b = random_quaternions(21, torch.float64).reshape((7, 3, 1, 4))
ab = quaternion_multiply(a, b)
self.assertEqual(ab.shape, (7, 3, 5, 4))
a_matrix = quaternion_to_matrix(a)
b_matrix = quaternion_to_matrix(b)
ab_matrix = torch.matmul(a_matrix, b_matrix)
ab_from_matrix = matrix_to_quaternion(ab_matrix)
self.assertEqual(ab.shape, ab_from_matrix.shape)
self.assertTrue(torch.allclose(ab, ab_from_matrix))
def test_quaternion_application(self):
"""Applying a quaternion is the same as applying the matrix."""
quaternions = random_quaternions(3, torch.float64, requires_grad=True)
matrices = quaternion_to_matrix(quaternions)
points = torch.randn(3, 3, dtype=torch.float64, requires_grad=True)
transform1 = quaternion_apply(quaternions, points)
transform2 = torch.matmul(matrices, points[..., None])[..., 0]
self.assertTrue(torch.allclose(transform1, transform2))
[p, q] = torch.autograd.grad(transform1.sum(), [points, quaternions])
self.assertTrue(torch.isfinite(p).all())
self.assertTrue(torch.isfinite(q).all())
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from pathlib import Path
import torch
from pytorch3d import _C
from pytorch3d.ops.sample_points_from_meshes import sample_points_from_meshes
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere
class TestSamplePoints(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
@staticmethod
def init_meshes(
num_meshes: int = 10,
num_verts: int = 1000,
num_faces: int = 3000,
device: str = "cpu",
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
def test_all_empty_meshes(self):
"""
Check sample_points_from_meshes raises an exception if all meshes are
invalid.
"""
device = torch.device("cuda:0")
verts1 = torch.tensor([], dtype=torch.float32, device=device)
faces1 = torch.tensor([], dtype=torch.int64, device=device)
meshes = Meshes(
verts=[verts1, verts1, verts1], faces=[faces1, faces1, faces1]
)
with self.assertRaises(ValueError) as err:
sample_points_from_meshes(
meshes, num_samples=100, return_normals=True
)
self.assertTrue("Meshes are empty." in str(err.exception))
def test_sampling_output(self):
"""
Check outputs of sampling are correct for different meshes.
For an ico_sphere, the sampled vertices should lie on a unit sphere.
For an empty mesh, the samples and normals should be 0.
"""
device = torch.device("cuda:0")
# Unit simplex.
verts_pyramid = torch.tensor(
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
],
dtype=torch.float32,
device=device,
)
faces_pyramid = torch.tensor(
[[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 2, 3]],
dtype=torch.int64,
device=device,
)
sphere_mesh = ico_sphere(9, device)
verts_sphere, faces_sphere = sphere_mesh.get_mesh_verts_faces(0)
verts_empty = torch.tensor([], dtype=torch.float32, device=device)
faces_empty = torch.tensor([], dtype=torch.int64, device=device)
num_samples = 10
meshes = Meshes(
verts=[verts_empty, verts_sphere, verts_pyramid],
faces=[faces_empty, faces_sphere, faces_pyramid],
)
samples, normals = sample_points_from_meshes(
meshes, num_samples=num_samples, return_normals=True
)
samples = samples.cpu()
normals = normals.cpu()
self.assertEqual(samples.shape, (3, num_samples, 3))
self.assertEqual(normals.shape, (3, num_samples, 3))
# Empty meshes: should have all zeros for samples and normals.
self.assertTrue(
torch.allclose(samples[0, :], torch.zeros((1, num_samples, 3)))
)
self.assertTrue(
torch.allclose(normals[0, :], torch.zeros((1, num_samples, 3)))
)
# Sphere: points should have radius 1.
x, y, z = samples[1, :].unbind(1)
radius = torch.sqrt(x ** 2 + y ** 2 + z ** 2)
self.assertTrue(torch.allclose(radius, torch.ones((num_samples))))
# Pyramid: points shoudl lie on one of the faces.
pyramid_verts = samples[2, :]
pyramid_normals = normals[2, :]
self.assertTrue(
torch.allclose(
pyramid_verts.lt(1).float(), torch.ones_like(pyramid_verts)
)
)
self.assertTrue(
torch.allclose(
(pyramid_verts >= 0).float(), torch.ones_like(pyramid_verts)
)
)
# Face 1: z = 0, x + y <= 1, normals = (0, 0, 1).
face_1_idxs = pyramid_verts[:, 2] == 0
face_1_verts, face_1_normals = (
pyramid_verts[face_1_idxs, :],
pyramid_normals[face_1_idxs, :],
)
self.assertTrue(
torch.all((face_1_verts[:, 0] + face_1_verts[:, 1]) <= 1)
)
self.assertTrue(
torch.allclose(
face_1_normals,
torch.tensor([0, 0, 1], dtype=torch.float32).expand(
face_1_normals.size()
),
)
)
# Face 2: x = 0, z + y <= 1, normals = (1, 0, 0).
face_2_idxs = pyramid_verts[:, 0] == 0
face_2_verts, face_2_normals = (
pyramid_verts[face_2_idxs, :],
pyramid_normals[face_2_idxs, :],
)
self.assertTrue(
torch.all((face_2_verts[:, 1] + face_2_verts[:, 2]) <= 1)
)
self.assertTrue(
torch.allclose(
face_2_normals,
torch.tensor([1, 0, 0], dtype=torch.float32).expand(
face_2_normals.size()
),
)
)
# Face 3: y = 0, x + z <= 1, normals = (0, -1, 0).
face_3_idxs = pyramid_verts[:, 1] == 0
face_3_verts, face_3_normals = (
pyramid_verts[face_3_idxs, :],
pyramid_normals[face_3_idxs, :],
)
self.assertTrue(
torch.all((face_3_verts[:, 0] + face_3_verts[:, 2]) <= 1)
)
self.assertTrue(
torch.allclose(
face_3_normals,
torch.tensor([0, -1, 0], dtype=torch.float32).expand(
face_3_normals.size()
),
)
)
# Face 4: x + y + z = 1, normals = (1, 1, 1)/sqrt(3).
face_4_idxs = pyramid_verts.gt(0).all(1)
face_4_verts, face_4_normals = (
pyramid_verts[face_4_idxs, :],
pyramid_normals[face_4_idxs, :],
)
self.assertTrue(
torch.allclose(
face_4_verts.sum(1), torch.ones(face_4_verts.size(0))
)
)
self.assertTrue(
torch.allclose(
face_4_normals,
(
torch.tensor([1, 1, 1], dtype=torch.float32)
/ torch.sqrt(torch.tensor(3, dtype=torch.float32))
).expand(face_4_normals.size()),
)
)
def test_mutinomial(self):
"""
Confirm that torch.multinomial does not sample elements which have
zero probability.
"""
freqs = torch.cuda.FloatTensor(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.03178183361887932,
0.027680952101945877,
0.033176131546497345,
0.046052902936935425,
0.07742464542388916,
0.11543981730937958,
0.14148041605949402,
0.15784293413162231,
0.13180233538150787,
0.08271478116512299,
0.049702685326337814,
0.027557924389839172,
0.018125897273421288,
0.011851548217236996,
0.010252203792333603,
0.007422595750540495,
0.005372154992073774,
0.0045109698548913,
0.0036087757907807827,
0.0035267581697553396,
0.0018864056328311563,
0.0024605290964245796,
0.0022964938543736935,
0.0018453967059031129,
0.0010662291897460818,
0.0009842115687206388,
0.00045109697384759784,
0.0007791675161570311,
0.00020504408166743815,
0.00020504408166743815,
0.00020504408166743815,
0.00012302644609007984,
0.0,
0.00012302644609007984,
4.100881778867915e-05,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
sample = []
for _ in range(1000):
torch.cuda.get_rng_state()
sample = torch.multinomial(freqs, 1000, True)
if freqs[sample].min() == 0:
sample_idx = (freqs[sample] == 0).nonzero()[0][0]
sampled = sample[sample_idx]
print(
"%s th element of last sample was %s, which has probability %s"
% (sample_idx, sampled, freqs[sampled])
)
return False
return True
def test_multinomial_weights(self):
"""
Confirm that torch.multinomial does not sample elements which have
zero probability using a real example of input from a training run.
"""
weights = torch.load(Path(__file__).resolve().parent / "weights.pt")
S = 4096
num_trials = 100
for _ in range(0, num_trials):
weights[weights < 0] = 0.0
samples = weights.multinomial(S, replacement=True)
sampled_weights = weights[samples]
assert sampled_weights.min() > 0
if sampled_weights.min() <= 0:
return False
return True
@staticmethod
def face_areas(verts, faces):
"""
Vectorized PyTorch implementation of triangle face area function.
"""
verts_faces = verts[faces]
v0x = verts_faces[:, 0::3, 0]
v0y = verts_faces[:, 0::3, 1]
v0z = verts_faces[:, 0::3, 2]
v1x = verts_faces[:, 1::3, 0]
v1y = verts_faces[:, 1::3, 1]
v1z = verts_faces[:, 1::3, 2]
v2x = verts_faces[:, 2::3, 0]
v2y = verts_faces[:, 2::3, 1]
v2z = verts_faces[:, 2::3, 2]
ax = v0x - v2x
ay = v0y - v2y
az = v0z - v2z
bx = v1x - v2x
by = v1y - v2y
bz = v1z - v2z
cx = ay * bz - az * by
cy = az * bx - ax * bz
cz = ax * by - ay * bx
# this gives the area of the parallelogram with sides a and b
area_sqr = cx * cx + cy * cy + cz * cz
# the area of the triangle is half
return torch.sqrt(area_sqr) / 2.0
def test_face_areas(self):
"""
Check the results from face_areas cuda and PyTorch implementions are
the same. Check that face_areas throws an error if cpu tensors are
given as input.
"""
meshes = self.init_meshes(10, 1000, 3000, device="cuda:0")
verts = meshes.verts_packed()
faces = meshes.faces_packed()
areas_torch = self.face_areas(verts, faces).squeeze()
areas_cuda, _ = _C.face_areas_normals(verts, faces)
self.assertTrue(torch.allclose(areas_torch, areas_cuda, atol=5e-8))
with self.assertRaises(Exception) as err:
_C.face_areas_normals(verts.cpu(), faces.cpu())
self.assertTrue("Not implemented on the CPU" in str(err.exception))
@staticmethod
def packed_to_padded_tensor(inputs, first_idxs, max_size):
"""
PyTorch implementation of cuda packed_to_padded_tensor function.
"""
num_meshes = first_idxs.size(0)
inputs_padded = torch.zeros((num_meshes, max_size))
for m in range(num_meshes):
s = first_idxs[m]
if m == num_meshes - 1:
f = inputs.size(0)
else:
f = first_idxs[m + 1]
inputs_padded[m, :f] = inputs[s:f]
return inputs_padded
def test_packed_to_padded_tensor(self):
"""
Check the results from packed_to_padded cuda and PyTorch implementions
are the same.
"""
meshes = self.init_meshes(1, 3, 5, device="cuda:0")
verts = meshes.verts_packed()
faces = meshes.faces_packed()
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_faces = meshes.num_faces_per_mesh().max().item()
areas, _ = _C.face_areas_normals(verts, faces)
areas_padded = _C.packed_to_padded_tensor(
areas, mesh_to_faces_packed_first_idx, max_faces
).cpu()
areas_padded_cpu = TestSamplePoints.packed_to_padded_tensor(
areas, mesh_to_faces_packed_first_idx, max_faces
)
self.assertTrue(torch.allclose(areas_padded, areas_padded_cpu))
with self.assertRaises(Exception) as err:
_C.packed_to_padded_tensor(
areas.cpu(), mesh_to_faces_packed_first_idx, max_faces
)
self.assertTrue("Not implemented on the CPU" in str(err.exception))
@staticmethod
def sample_points_with_init(
num_meshes: int,
num_verts: int,
num_faces: int,
num_samples: int,
device: str = "cpu",
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
torch.cuda.synchronize()
def sample_points():
sample_points_from_meshes(
meshes, num_samples=num_samples, return_normals=True
)
torch.cuda.synchronize()
return sample_points
@staticmethod
def face_areas_with_init(
num_meshes: int, num_verts: int, num_faces: int, cuda: str = True
):
device = "cuda" if cuda else "cpu"
meshes = TestSamplePoints.init_meshes(
num_meshes, num_verts, num_faces, device
)
verts = meshes.verts_packed()
faces = meshes.faces_packed()
torch.cuda.synchronize()
def face_areas():
if cuda:
_C.face_areas_normals(verts, faces)
else:
TestSamplePoints.face_areas(verts, faces)
torch.cuda.synchronize()
return face_areas
@staticmethod
def packed_to_padded_with_init(
num_meshes: int, num_verts: int, num_faces: int, cuda: str = True
):
device = "cuda" if cuda else "cpu"
meshes = TestSamplePoints.init_meshes(
num_meshes, num_verts, num_faces, device
)
verts = meshes.verts_packed()
faces = meshes.faces_packed()
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_faces = meshes.num_faces_per_mesh().max().item()
if cuda:
areas, _ = _C.face_areas_normals(verts, faces)
else:
areas = TestSamplePoints.face_areas(verts, faces)
torch.cuda.synchronize()
def packed_to_padded():
if cuda:
_C.packed_to_padded_tensor(
areas, mesh_to_faces_packed_first_idx, max_faces
)
else:
TestSamplePoints.packed_to_padded_tensor(
areas, mesh_to_faces_packed_first_idx, max_faces
)
torch.cuda.synchronize()
return packed_to_padded
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
from pytorch3d.transforms.so3 import (
hat,
so3_exponential_map,
so3_log_map,
so3_relative_angle,
)
class TestSO3(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
np.random.seed(42)
@staticmethod
def init_log_rot(batch_size: int = 10):
"""
Initialize a list of `batch_size` 3-dimensional vectors representing
randomly generated logarithms of rotation matrices.
"""
device = torch.device("cuda:0")
log_rot = torch.randn(
(batch_size, 3), dtype=torch.float32, device=device
)
return log_rot
@staticmethod
def init_rot(batch_size: int = 10):
"""
Randomly generate a batch of `batch_size` 3x3 rotation matrices.
"""
device = torch.device("cuda:0")
# TODO(dnovotny): replace with random_rotation from random_rotation.py
rot = []
for _ in range(batch_size):
r = torch.qr(torch.randn((3, 3), device=device))[0]
f = torch.randint(2, (3,), device=device, dtype=torch.float32)
if f.sum() % 2 == 0:
f = 1 - f
rot.append(r * (2 * f - 1).float())
rot = torch.stack(rot)
return rot
def test_determinant(self):
"""
Tests whether the determinants of 3x3 rotation matrices produced
by `so3_exponential_map` are (almost) equal to 1.
"""
log_rot = TestSO3.init_log_rot(batch_size=30)
Rs = so3_exponential_map(log_rot)
for R in Rs:
det = np.linalg.det(R.cpu().numpy())
self.assertAlmostEqual(float(det), 1.0, 5)
def test_cross(self):
"""
For a pair of randomly generated 3-dimensional vectors `a` and `b`,
tests whether a matrix product of `hat(a)` and `b` equals the result
of a cross product between `a` and `b`.
"""
device = torch.device("cuda:0")
a, b = torch.randn((2, 100, 3), dtype=torch.float32, device=device)
hat_a = hat(a)
cross = torch.bmm(hat_a, b[:, :, None])[:, :, 0]
torch_cross = torch.cross(a, b, dim=1)
max_df = (cross - torch_cross).abs().max()
self.assertAlmostEqual(float(max_df), 0.0, 5)
def test_bad_so3_input_value_err(self):
"""
Tests whether `so3_exponential_map` and `so3_log_map` correctly return
a ValueError if called with an argument of incorrect shape or, in case
of `so3_exponential_map`, unexpected trace.
"""
device = torch.device("cuda:0")
log_rot = torch.randn(size=[5, 4], device=device)
with self.assertRaises(ValueError) as err:
so3_exponential_map(log_rot)
self.assertTrue(
"Input tensor shape has to be Nx3." in str(err.exception)
)
rot = torch.randn(size=[5, 3, 5], device=device)
with self.assertRaises(ValueError) as err:
so3_log_map(rot)
self.assertTrue(
"Input has to be a batch of 3x3 Tensors." in str(err.exception)
)
# trace of rot definitely bigger than 3 or smaller than -1
rot = torch.cat(
(
torch.rand(size=[5, 3, 3], device=device) + 4.0,
torch.rand(size=[5, 3, 3], device=device) - 3.0,
)
)
with self.assertRaises(ValueError) as err:
so3_log_map(rot)
self.assertTrue(
"A matrix has trace outside valid range [-1-eps,3+eps]."
in str(err.exception)
)
def test_so3_exp_singularity(self, batch_size: int = 100):
"""
Tests whether the `so3_exponential_map` is robust to the input vectors
the norms of which are close to the numerically unstable region
(vectors with low l2-norms).
"""
# generate random log-rotations with a tiny angle
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
log_rot_small = log_rot * 1e-6
R = so3_exponential_map(log_rot_small)
# tests whether all outputs are finite
R_sum = float(R.sum())
self.assertEqual(R_sum, R_sum)
def test_so3_log_singularity(self, batch_size: int = 100):
"""
Tests whether the `so3_log_map` is robust to the input matrices
who's rotation angles are close to the numerically unstable region
(i.e. matrices with low rotation angles).
"""
# generate random rotations with a tiny angle
device = torch.device("cuda:0")
r = torch.eye(3, device=device)[None].repeat((batch_size, 1, 1))
r += torch.randn((batch_size, 3, 3), device=device) * 1e-3
r = torch.stack([torch.qr(r_)[0] for r_ in r])
# the log of the rotation matrix r
r_log = so3_log_map(r)
# tests whether all outputs are finite
r_sum = float(r_log.sum())
self.assertEqual(r_sum, r_sum)
def test_so3_log_to_exp_to_log(self, batch_size: int = 100):
"""
Check that `so3_log_map(so3_exponential_map(log_rot))==log_rot` for
a randomly generated batch of rotation matrix logarithms `log_rot`.
"""
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
log_rot_ = so3_log_map(so3_exponential_map(log_rot))
max_df = (log_rot - log_rot_).abs().max()
self.assertAlmostEqual(float(max_df), 0.0, 4)
def test_so3_exp_to_log_to_exp(self, batch_size: int = 100):
"""
Check that `so3_exponential_map(so3_log_map(R))==R` for
a batch of randomly generated rotation matrices `R`.
"""
rot = TestSO3.init_rot(batch_size=batch_size)
rot_ = so3_exponential_map(so3_log_map(rot))
angles = so3_relative_angle(rot, rot_)
max_angle = angles.max()
# a lot of precision lost here :(
# TODO: fix this test??
self.assertTrue(np.allclose(float(max_angle), 0.0, atol=0.1))
def test_so3_cos_angle(self, batch_size: int = 100):
"""
Check that `so3_relative_angle(R1, R2, cos_angle=False).cos()`
is the same as `so3_relative_angle(R1, R2, cos_angle=True)`
batches of randomly generated rotation matrices `R1` and `R2`.
"""
rot1 = TestSO3.init_rot(batch_size=batch_size)
rot2 = TestSO3.init_rot(batch_size=batch_size)
angles = so3_relative_angle(rot1, rot2, cos_angle=False).cos()
angles_ = so3_relative_angle(rot1, rot2, cos_angle=True)
self.assertTrue(torch.allclose(angles, angles_))
@staticmethod
def so3_expmap(batch_size: int = 10):
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
torch.cuda.synchronize()
def compute_rots():
so3_exponential_map(log_rot)
torch.cuda.synchronize()
return compute_rots
@staticmethod
def so3_logmap(batch_size: int = 10):
log_rot = TestSO3.init_rot(batch_size=batch_size)
torch.cuda.synchronize()
def compute_logs():
so3_log_map(log_rot)
torch.cuda.synchronize()
return compute_logs
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.structures import utils as struct_utils
from common_testing import TestCaseMixin
class TestStructUtils(TestCaseMixin, unittest.TestCase):
def test_list_to_padded(self):
device = torch.device("cuda:0")
N = 5
K = 20
ndim = 2
x = []
for _ in range(N):
dims = torch.randint(K, size=(ndim,)).tolist()
x.append(torch.rand(dims, device=device))
pad_size = [K] * ndim
x_padded = struct_utils.list_to_padded(
x, pad_size=pad_size, pad_value=0.0, equisized=False
)
self.assertEqual(x_padded.shape[1], K)
self.assertEqual(x_padded.shape[2], K)
for i in range(N):
self.assertClose(
x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i]
)
# check for no pad size (defaults to max dimension)
x_padded = struct_utils.list_to_padded(
x, pad_value=0.0, equisized=False
)
max_size0 = max(y.shape[0] for y in x)
max_size1 = max(y.shape[1] for y in x)
self.assertEqual(x_padded.shape[1], max_size0)
self.assertEqual(x_padded.shape[2], max_size1)
for i in range(N):
self.assertClose(
x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i]
)
# check for equisized
x = [torch.rand((K, 10), device=device) for _ in range(N)]
x_padded = struct_utils.list_to_padded(x, equisized=True)
self.assertClose(x_padded, torch.stack(x, 0))
# catch ValueError for invalid dimensions
with self.assertRaisesRegex(ValueError, "Pad size must"):
pad_size = [K] * 4
struct_utils.list_to_padded(
x, pad_size=pad_size, pad_value=0.0, equisized=False
)
# invalid input tensor dimensions
x = []
ndim = 3
for _ in range(N):
dims = torch.randint(K, size=(ndim,)).tolist()
x.append(torch.rand(dims, device=device))
pad_size = [K] * 2
with self.assertRaisesRegex(ValueError, "Supports only"):
x_padded = struct_utils.list_to_padded(
x, pad_size=pad_size, pad_value=0.0, equisized=False
)
def test_padded_to_list(self):
device = torch.device("cuda:0")
N = 5
K = 20
ndim = 2
dims = [K] * ndim
x = torch.rand([N] + dims, device=device)
x_list = struct_utils.padded_to_list(x)
for i in range(N):
self.assertClose(x_list[i], x[i])
split_size = torch.randint(1, K, size=(N,)).tolist()
x_list = struct_utils.padded_to_list(x, split_size)
for i in range(N):
self.assertClose(x_list[i], x[i, : split_size[i]])
split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0)
x_list = struct_utils.padded_to_list(x, split_size)
for i in range(N):
self.assertClose(
x_list[i], x[i, : split_size[i][0], : split_size[i][1]]
)
with self.assertRaisesRegex(ValueError, "Supports only"):
x = torch.rand((N, K, K, K, K), device=device)
split_size = torch.randint(1, K, size=(N,)).tolist()
struct_utils.padded_to_list(x, split_size)
def test_list_to_packed(self):
device = torch.device("cuda:0")
N = 5
K = 20
x, x_dims = [], []
dim2 = torch.randint(K, size=(1,)).item()
for _ in range(N):
dim1 = torch.randint(K, size=(1,)).item()
x_dims.append(dim1)
x.append(torch.rand([dim1, dim2], device=device))
out = struct_utils.list_to_packed(x)
x_packed = out[0]
num_items = out[1]
item_packed_first_idx = out[2]
item_packed_to_list_idx = out[3]
cur = 0
for i in range(N):
self.assertTrue(num_items[i] == x_dims[i])
self.assertTrue(item_packed_first_idx[i] == cur)
self.assertTrue(
item_packed_to_list_idx[cur : cur + x_dims[i]].eq(i).all()
)
self.assertClose(x_packed[cur : cur + x_dims[i]], x[i])
cur += x_dims[i]
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.ops.subdivide_meshes import SubdivideMeshes
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere
class TestSubdivideMeshes(unittest.TestCase):
def test_simple_subdivide(self):
# Create a mesh with one face and check the subdivided mesh has
# 4 faces with the correct vertex coordinates.
device = torch.device("cuda:0")
verts = torch.tensor(
[[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
dtype=torch.float32,
device=device,
requires_grad=True,
)
faces = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
mesh = Meshes(verts=[verts], faces=[faces])
subdivide = SubdivideMeshes()
new_mesh = subdivide(mesh)
# Subdivided face:
#
# v0
# /\
# / \
# / f0 \
# v4 /______\ v3
# /\ /\
# / \ f3 / \
# / f2 \ / f1 \
# /______\/______\
# v2 v5 v1
#
gt_subdivide_verts = torch.tensor(
[
[0.5, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.75, 0.5, 0.0],
[0.25, 0.5, 0.0],
[0.5, 0.0, 0.0],
],
dtype=torch.float32,
device=device,
)
gt_subdivide_faces = torch.tensor(
[[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],
dtype=torch.int64,
device=device,
)
new_verts, new_faces = new_mesh.get_mesh_verts_faces(0)
self.assertTrue(torch.allclose(new_verts, gt_subdivide_verts))
self.assertTrue(torch.allclose(new_faces, gt_subdivide_faces))
self.assertTrue(new_verts.requires_grad == verts.requires_grad)
def test_heterogeneous_meshes(self):
device = torch.device("cuda:0")
verts1 = torch.tensor(
[[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
dtype=torch.float32,
device=device,
requires_grad=True,
)
faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
verts2 = torch.tensor(
[
[0.5, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.5, 1.0, 0.0],
],
dtype=torch.float32,
device=device,
requires_grad=True,
)
faces2 = torch.tensor(
[[0, 1, 2], [0, 3, 1]], dtype=torch.int64, device=device
)
faces3 = torch.tensor(
[[0, 1, 2], [0, 2, 3]], dtype=torch.int64, device=device
)
mesh = Meshes(
verts=[verts1, verts2, verts2], faces=[faces1, faces2, faces3]
)
subdivide = SubdivideMeshes()
new_mesh = subdivide(mesh.clone())
gt_subdivided_verts1 = torch.tensor(
[
[0.5, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.75, 0.5, 0.0],
[0.25, 0.5, 0.0],
[0.5, 0.0, 0.0],
],
dtype=torch.float32,
device=device,
)
gt_subdivided_faces1 = torch.tensor(
[[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],
dtype=torch.int64,
device=device,
)
# faces2:
#
# v0 _______e2_______ v3
# /\ /
# / \ /
# / \ /
# e1 / \ e0 / e4
# / \ /
# / \ /
# / \ /
# /______________\/
# v2 e3 v1
#
# Subdivided faces2:
#
# v0 _______v6_______ v3
# /\ /\ /
# / \ f1 / \ f3 /
# / f0 \ / f7 \ /
# v5 /______v4______\/v8
# /\ /\ /
# / \ f6 / \ f5 /
# / f4 \ / f2 \ /
# /______\/______\/
# v2 v7 v1
#
gt_subdivided_verts2 = torch.tensor(
[
[0.5, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.5, 1.0, 0.0],
[0.75, 0.5, 0.0],
[0.25, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.5, 0.0, 0.0],
[1.25, 0.5, 0.0],
],
dtype=torch.float32,
device=device,
)
gt_subdivided_faces2 = torch.tensor(
[
[0, 4, 5],
[0, 6, 4],
[1, 7, 4],
[3, 8, 6],
[2, 5, 7],
[1, 4, 8],
[7, 5, 4],
[8, 4, 6],
],
dtype=torch.int64,
device=device,
)
gt_subdivided_verts3 = gt_subdivided_verts2.clone()
gt_subdivided_verts3[-1, :] = torch.tensor(
[0.75, 0.5, 0], dtype=torch.float32, device=device
)
gt_subdivided_faces3 = torch.tensor(
[
[0, 4, 5],
[0, 5, 6],
[1, 7, 4],
[2, 8, 5],
[2, 5, 7],
[3, 6, 8],
[7, 5, 4],
[8, 6, 5],
],
dtype=torch.int64,
device=device,
)
new_mesh_verts1, new_mesh_faces1 = new_mesh.get_mesh_verts_faces(0)
new_mesh_verts2, new_mesh_faces2 = new_mesh.get_mesh_verts_faces(1)
new_mesh_verts3, new_mesh_faces3 = new_mesh.get_mesh_verts_faces(2)
self.assertTrue(torch.allclose(new_mesh_verts1, gt_subdivided_verts1))
self.assertTrue(torch.allclose(new_mesh_faces1, gt_subdivided_faces1))
self.assertTrue(torch.allclose(new_mesh_verts2, gt_subdivided_verts2))
self.assertTrue(torch.allclose(new_mesh_faces2, gt_subdivided_faces2))
self.assertTrue(torch.allclose(new_mesh_verts3, gt_subdivided_verts3))
self.assertTrue(torch.allclose(new_mesh_faces3, gt_subdivided_faces3))
self.assertTrue(new_mesh_verts1.requires_grad == verts1.requires_grad)
self.assertTrue(new_mesh_verts2.requires_grad == verts2.requires_grad)
self.assertTrue(new_mesh_verts3.requires_grad == verts2.requires_grad)
def test_subdivide_features(self):
device = torch.device("cuda:0")
mesh = ico_sphere(0, device)
N = 10
mesh = mesh.extend(N)
edges = mesh.edges_packed()
V = mesh.num_verts_per_mesh()[0]
D = 256
feats = torch.rand(
(N * V, D), dtype=torch.float32, device=device, requires_grad=True
) # packed features
app_feats = feats[edges].mean(1)
subdivide = SubdivideMeshes()
new_mesh, new_feats = subdivide(mesh, feats)
gt_feats = torch.cat(
(feats.view(N, V, D), app_feats.view(N, -1, D)), dim=1
).view(-1, D)
self.assertTrue(torch.allclose(new_feats, gt_feats))
self.assertTrue(new_feats.requires_grad == gt_feats.requires_grad)
@staticmethod
def subdivide_meshes_with_init(
num_meshes: int = 10, same_topo: bool = False
):
device = torch.device("cuda:0")
meshes = ico_sphere(0, device=device)
if num_meshes > 1:
meshes = meshes.extend(num_meshes)
meshes_init = meshes.clone() if same_topo else None
torch.cuda.synchronize()
def subdivide_meshes():
subdivide = SubdivideMeshes(meshes=meshes_init)
subdivide(meshes=meshes.clone())
torch.cuda.synchronize()
return subdivide_meshes
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn.functional as F
from pytorch3d.renderer.mesh.rasterizer import Fragments
from pytorch3d.renderer.mesh.texturing import (
_clip_barycentric_coordinates,
interpolate_face_attributes,
interpolate_texture_map,
interpolate_vertex_colors,
)
from pytorch3d.structures import Meshes, Textures
from common_testing import TestCaseMixin
from test_meshes import TestMeshes
class TestTexturing(TestCaseMixin, unittest.TestCase):
def test_interpolate_attributes(self):
"""
This tests both interpolate_vertex_colors as well as
interpolate_face_attributes.
"""
verts = torch.randn((4, 3), dtype=torch.float32)
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
vert_tex = torch.tensor(
[[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32
)
tex = Textures(verts_rgb=vert_tex[None, :])
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
barycentric_coords = torch.tensor(
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
).view(1, 1, 1, 2, -1)
expected_vals = torch.tensor(
[[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32
).view(1, 1, 1, 2, -1)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=barycentric_coords,
zbuf=torch.ones_like(pix_to_face),
dists=torch.ones_like(pix_to_face),
)
texels = interpolate_vertex_colors(fragments, mesh)
self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
def test_interpolate_attributes_grad(self):
verts = torch.randn((4, 3), dtype=torch.float32)
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
vert_tex = torch.tensor(
[[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
dtype=torch.float32,
requires_grad=True,
)
tex = Textures(verts_rgb=vert_tex[None, :])
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
barycentric_coords = torch.tensor(
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
).view(1, 1, 1, 2, -1)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=barycentric_coords,
zbuf=torch.ones_like(pix_to_face),
dists=torch.ones_like(pix_to_face),
)
grad_vert_tex = torch.tensor(
[
[0.3, 0.3, 0.3],
[0.9, 0.9, 0.9],
[0.5, 0.5, 0.5],
[0.3, 0.3, 0.3],
],
dtype=torch.float32,
)
texels = interpolate_vertex_colors(fragments, mesh)
texels.sum().backward()
self.assertTrue(hasattr(vert_tex, "grad"))
self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
def test_interpolate_face_attributes_fail(self):
# 1. A face can only have 3 verts
# i.e. face_attributes must have shape (F, 3, D)
face_attributes = torch.ones(1, 4, 3)
pix_to_face = torch.ones((1, 1, 1, 1))
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=pix_to_face[..., None].expand(-1, -1, -1, -1, 3),
zbuf=pix_to_face,
dists=pix_to_face,
)
with self.assertRaises(ValueError):
interpolate_face_attributes(fragments, face_attributes)
# 2. pix_to_face must have shape (N, H, W, K)
pix_to_face = torch.ones((1, 1, 1, 1, 3))
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=pix_to_face,
zbuf=pix_to_face,
dists=pix_to_face,
)
with self.assertRaises(ValueError):
interpolate_face_attributes(fragments, face_attributes)
def test_interpolate_texture_map(self):
barycentric_coords = torch.tensor(
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
).view(1, 1, 1, 2, -1)
dummy_verts = torch.zeros(4, 3)
vert_uvs = torch.tensor(
[[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32
)
face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)
interpolated_uvs = torch.tensor(
[[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32
)
# Create a dummy texture map
H = 2
W = 2
x = torch.linspace(0, 1, W).view(1, W).expand(H, W)
y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)
tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=barycentric_coords,
zbuf=pix_to_face,
dists=pix_to_face,
)
tex = Textures(
maps=tex_map,
faces_uvs=face_uvs[None, ...],
verts_uvs=vert_uvs[None, ...],
)
meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex)
texels = interpolate_texture_map(fragments, meshes)
# Expected output
pixel_uvs = interpolated_uvs * 2.0 - 1.0
pixel_uvs = pixel_uvs.view(2, 1, 1, 2)
tex_map = torch.flip(tex_map, [1])
tex_map = tex_map.permute(0, 3, 1, 2)
tex_map = torch.cat([tex_map, tex_map], dim=0)
expected_out = F.grid_sample(tex_map, pixel_uvs, align_corners=False)
self.assertTrue(
torch.allclose(texels.squeeze(), expected_out.squeeze())
)
def test_clone(self):
V = 20
tex = Textures(
maps=torch.ones((5, 16, 16, 3)),
faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
verts_uvs=torch.ones((5, V, 2)),
)
tex_cloned = tex.clone()
self.assertSeparate(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
self.assertSeparate(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
self.assertSeparate(tex._maps_padded, tex_cloned._maps_padded)
def test_to(self):
V = 20
tex = Textures(
maps=torch.ones((5, 16, 16, 3)),
faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
verts_uvs=torch.ones((5, V, 2)),
)
device = torch.device("cuda:0")
tex = tex.to(device)
self.assertTrue(tex._faces_uvs_padded.device == device)
self.assertTrue(tex._verts_uvs_padded.device == device)
self.assertTrue(tex._maps_padded.device == device)
def test_extend(self):
B = 10
mesh = TestMeshes.init_mesh(B, 30, 50)
V = mesh._V
F = mesh._F
tex = Textures(
maps=torch.randn((B, 16, 16, 3)),
faces_uvs=torch.randint(size=(B, F, 3), low=0, high=V),
verts_uvs=torch.randn((B, V, 2)),
)
tex_mesh = Meshes(
verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex
)
N = 20
new_mesh = tex_mesh.extend(N)
self.assertEqual(len(tex_mesh) * N, len(new_mesh))
tex_init = tex_mesh.textures
new_tex = new_mesh.textures
for i in range(len(tex_mesh)):
for n in range(N):
self.assertClose(
tex_init.faces_uvs_list()[i],
new_tex.faces_uvs_list()[i * N + n],
)
self.assertClose(
tex_init.verts_uvs_list()[i],
new_tex.verts_uvs_list()[i * N + n],
)
self.assertAllSeparate(
[
tex_init.faces_uvs_padded(),
new_tex.faces_uvs_padded(),
tex_init.verts_uvs_padded(),
new_tex.verts_uvs_padded(),
tex_init.maps_padded(),
new_tex.maps_padded(),
]
)
with self.assertRaises(ValueError):
tex_mesh.extend(N=-1)
def test_clip_barycentric_coords(self):
barycentric_coords = torch.tensor(
[[1.5, -0.3, -0.2], [1.2, 0.3, -0.5]], dtype=torch.float32
)
expected_out = torch.tensor(
[[1.0, 0.0, 0.0], [1.0 / 1.3, 0.3 / 1.3, 0.0]], dtype=torch.float32
)
clipped = _clip_barycentric_coordinates(barycentric_coords)
self.assertTrue(torch.allclose(clipped, expected_out))
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import math
import unittest
import torch
from pytorch3d.transforms.so3 import so3_exponential_map
from pytorch3d.transforms.transform3d import (
Rotate,
RotateAxisAngle,
Scale,
Transform3d,
Translate,
)
class TestTransform(unittest.TestCase):
def test_to(self):
tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]]))
R = torch.FloatTensor(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]
)
R = Rotate(R)
t = Transform3d().compose(R, tr)
for _ in range(3):
t.cpu()
t.cuda()
t.cuda()
t.cpu()
def test_clone(self):
"""
Check that cloned transformations contain different _matrix objects.
Also, the clone of a composed translation and rotation has to be
the same as composition of clones of translation and rotation.
"""
tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]]))
R = torch.FloatTensor(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]
)
R = Rotate(R)
# check that the _matrix property of clones of
# both transforms are different
for t in (R, tr):
self.assertTrue(t._matrix is not t.clone()._matrix)
# check that the _transforms lists of composition of R, tr contain
# different objects
t1 = Transform3d().compose(R, tr)
for t, t_clone in (t1._transforms, t1.clone()._transforms):
self.assertTrue(t is not t_clone)
self.assertTrue(t._matrix is not t_clone._matrix)
# check that all composed transforms are numerically equivalent
t2 = Transform3d().compose(R.clone(), tr.clone())
t3 = t1.clone()
for t_pair in ((t1, t2), (t1, t3), (t2, t3)):
matrix1 = t_pair[0].get_matrix()
matrix2 = t_pair[1].get_matrix()
self.assertTrue(torch.allclose(matrix1, matrix2))
def test_translate(self):
t = Transform3d().translate(1, 2, 3)
points = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]
).view(1, 3, 3)
normals = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
).view(1, 3, 3)
points_out = t.transform_points(points)
normals_out = t.transform_normals(normals)
points_out_expected = torch.tensor(
[[2.0, 2.0, 3.0], [1.0, 3.0, 3.0], [1.5, 2.5, 3.0]]
).view(1, 3, 3)
normals_out_expected = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
).view(1, 3, 3)
self.assertTrue(torch.allclose(points_out, points_out_expected))
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
def test_scale(self):
t = Transform3d().scale(2.0).scale(0.5, 0.25, 1.0)
points = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]
).view(1, 3, 3)
normals = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
).view(1, 3, 3)
points_out = t.transform_points(points)
normals_out = t.transform_normals(normals)
points_out_expected = torch.tensor(
[[1.00, 0.00, 0.00], [0.00, 0.50, 0.00], [0.50, 0.25, 0.00]]
).view(1, 3, 3)
normals_out_expected = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [1.0, 2.0, 0.0]]
).view(1, 3, 3)
self.assertTrue(torch.allclose(points_out, points_out_expected))
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
def test_scale_translate(self):
t = Transform3d().scale(2, 1, 3).translate(1, 2, 3)
points = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]
).view(1, 3, 3)
normals = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
).view(1, 3, 3)
points_out = t.transform_points(points)
normals_out = t.transform_normals(normals)
points_out_expected = torch.tensor(
[[3.0, 2.0, 3.0], [1.0, 3.0, 3.0], [2.0, 2.5, 3.0]]
).view(1, 3, 3)
normals_out_expected = torch.tensor(
[[0.5, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 1.0, 0.0]]
).view(1, 3, 3)
self.assertTrue(torch.allclose(points_out, points_out_expected))
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
def test_rotate_axis_angle(self):
t = Transform3d().rotate_axis_angle(-90.0, axis="Z")
points = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]]
).view(1, 3, 3)
normals = torch.tensor(
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
).view(1, 3, 3)
points_out = t.transform_points(points)
normals_out = t.transform_normals(normals)
points_out_expected = torch.tensor(
[[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 1.0]]
).view(1, 3, 3)
normals_out_expected = torch.tensor(
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
).view(1, 3, 3)
self.assertTrue(torch.allclose(points_out, points_out_expected))
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
def test_transform_points_fail(self):
t1 = Scale(0.1, 0.1, 0.1)
P = 7
with self.assertRaises(ValueError):
t1.transform_points(torch.randn(P))
def test_compose_fail(self):
# Only composing Transform3d objects is possible
t1 = Scale(0.1, 0.1, 0.1)
with self.assertRaises(ValueError):
t1.compose(torch.randn(100))
def test_transform_points_eps(self):
t1 = Transform3d()
persp_proj = [
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
]
]
t1._matrix = torch.FloatTensor(persp_proj)
points = torch.tensor(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1e-5], [-1.0, 0.0, 1e-5]]
).view(
1, 3, 3
) # a set of points with z-coord very close to 0
proj = t1.transform_points(points)
proj_eps = t1.transform_points(points, eps=1e-4)
self.assertTrue(not bool(torch.isfinite(proj.sum())))
self.assertTrue(bool(torch.isfinite(proj_eps.sum())))
def test_inverse(self, batch_size=5):
device = torch.device("cuda:0")
# generate a random chain of transforms
for _ in range(10): # 10 different tries
# list of transform matrices
ts = []
for i in range(10):
choice = float(torch.rand(1))
if choice <= 1.0 / 3.0:
t_ = Translate(
torch.randn(
(batch_size, 3), dtype=torch.float32, device=device
),
device=device,
)
elif choice <= 2.0 / 3.0:
t_ = Rotate(
so3_exponential_map(
torch.randn(
(batch_size, 3),
dtype=torch.float32,
device=device,
)
),
device=device,
)
else:
rand_t = torch.randn(
(batch_size, 3), dtype=torch.float32, device=device
)
rand_t = rand_t.sign() * torch.clamp(rand_t.abs(), 0.2)
t_ = Scale(rand_t, device=device)
ts.append(t_._matrix.clone())
if i == 0:
t = t_
else:
t = t.compose(t_)
# generate the inverse transformation in several possible ways
m1 = t.inverse(invert_composed=True).get_matrix()
m2 = t.inverse(invert_composed=True)._matrix
m3 = t.inverse(invert_composed=False).get_matrix()
m4 = t.get_matrix().inverse()
# compute the inverse explicitly ...
m5 = torch.eye(4, dtype=torch.float32, device=device)
m5 = m5[None].repeat(batch_size, 1, 1)
for t_ in ts:
m5 = torch.bmm(torch.inverse(t_), m5)
# assert all same
for m in (m1, m2, m3, m4):
self.assertTrue(torch.allclose(m, m5, atol=1e-3))
class TestTranslate(unittest.TestCase):
def test_python_scalar(self):
t = Translate(0.2, 0.3, 0.4)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_torch_scalar(self):
x = torch.tensor(0.2)
y = torch.tensor(0.3)
z = torch.tensor(0.4)
t = Translate(x, y, z)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_mixed_scalars(self):
x = 0.2
y = torch.tensor(0.3)
z = 0.4
t = Translate(x, y, z)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_torch_scalar_grads(self):
# Make sure backprop works if we give torch scalars
x = torch.tensor(0.2, requires_grad=True)
y = torch.tensor(0.3, requires_grad=True)
z = torch.tensor(0.4)
t = Translate(x, y, z)
t._matrix.sum().backward()
self.assertTrue(hasattr(x, "grad"))
self.assertTrue(hasattr(y, "grad"))
self.assertTrue(torch.allclose(x.grad, x.new_ones(x.shape)))
self.assertTrue(torch.allclose(y.grad, y.new_ones(y.shape)))
def test_torch_vectors(self):
x = torch.tensor([0.2, 2.0])
y = torch.tensor([0.3, 3.0])
z = torch.tensor([0.4, 4.0])
t = Translate(x, y, z)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
],
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[2.0, 3.0, 4.0, 1],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_vector_broadcast(self):
x = torch.tensor([0.2, 2.0])
y = torch.tensor([0.3, 3.0])
z = torch.tensor([0.4])
t = Translate(x, y, z)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
],
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[2.0, 3.0, 0.4, 1],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_bad_broadcast(self):
x = torch.tensor([0.2, 2.0, 20.0])
y = torch.tensor([0.3, 3.0])
z = torch.tensor([0.4])
with self.assertRaises(ValueError):
Translate(x, y, z)
def test_mixed_broadcast(self):
x = 0.2
y = torch.tensor(0.3)
z = torch.tensor([0.4, 4.0])
t = Translate(x, y, z)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
],
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 4.0, 1],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_mixed_broadcast_grad(self):
x = 0.2
y = torch.tensor(0.3, requires_grad=True)
z = torch.tensor([0.4, 4.0], requires_grad=True)
t = Translate(x, y, z)
t._matrix.sum().backward()
self.assertTrue(hasattr(y, "grad"))
self.assertTrue(hasattr(z, "grad"))
y_grad = torch.tensor(2.0)
z_grad = torch.tensor([1.0, 1.0])
self.assertEqual(y.grad.shape, y_grad.shape)
self.assertEqual(z.grad.shape, z_grad.shape)
self.assertTrue(torch.allclose(y.grad, y_grad))
self.assertTrue(torch.allclose(z.grad, z_grad))
def test_matrix(self):
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
t = Translate(xyz)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[0.2, 0.3, 0.4, 1],
],
[
[1.0, 0.0, 0.0, 0],
[0.0, 1.0, 0.0, 0],
[0.0, 0.0, 1.0, 0],
[2.0, 3.0, 4.0, 1],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_matrix_extra_args(self):
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
with self.assertRaises(ValueError):
Translate(xyz, xyz[:, 1], xyz[:, 2])
def test_inverse(self):
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
t = Translate(xyz)
im = t.inverse()._matrix
im_2 = t._matrix.inverse()
im_comp = t.get_matrix().inverse()
self.assertTrue(torch.allclose(im, im_comp))
self.assertTrue(torch.allclose(im, im_2))
class TestScale(unittest.TestCase):
def test_single_python_scalar(self):
t = Scale(0.1)
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_single_torch_scalar(self):
t = Scale(torch.tensor(0.1))
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_single_vector(self):
t = Scale(torch.tensor([0.1, 0.2]))
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[0.2, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.2, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_single_matrix(self):
xyz = torch.tensor([[0.1, 0.2, 0.3], [1.0, 2.0, 3.0]])
t = Scale(xyz)
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_three_python_scalar(self):
t = Scale(0.1, 0.2, 0.3)
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_three_torch_scalar(self):
t = Scale(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_three_mixed_scalar(self):
t = Scale(torch.tensor(0.1), 0.2, torch.tensor(0.3))
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_three_vector_broadcast(self):
x = torch.tensor([0.1])
y = torch.tensor([0.2, 2.0])
z = torch.tensor([0.3, 3.0])
t = Scale(x, y, z)
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_three_mixed_broadcast_grad(self):
x = 0.1
y = torch.tensor(0.2, requires_grad=True)
z = torch.tensor([0.3, 3.0], requires_grad=True)
t = Scale(x, y, z)
matrix = torch.tensor(
[
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[0.1, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0],
[0.0, 0.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
t._matrix.sum().backward()
self.assertTrue(hasattr(y, "grad"))
self.assertTrue(hasattr(z, "grad"))
y_grad = torch.tensor(2.0)
z_grad = torch.tensor([1.0, 1.0])
self.assertTrue(torch.allclose(y.grad, y_grad))
self.assertTrue(torch.allclose(z.grad, z_grad))
def test_inverse(self):
x = torch.tensor([0.1])
y = torch.tensor([0.2, 2.0])
z = torch.tensor([0.3, 3.0])
t = Scale(x, y, z)
im = t.inverse()._matrix
im_2 = t._matrix.inverse()
im_comp = t.get_matrix().inverse()
self.assertTrue(torch.allclose(im, im_comp))
self.assertTrue(torch.allclose(im, im_2))
class TestTransformBroadcast(unittest.TestCase):
def test_broadcast_transform_points(self):
t1 = Scale(0.1, 0.1, 0.1)
N = 10
P = 7
M = 20
x = torch.tensor([0.2] * N)
y = torch.tensor([0.3] * N)
z = torch.tensor([0.4] * N)
tN = Translate(x, y, z)
p1 = t1.transform_points(torch.randn(P, 3))
self.assertTrue(p1.shape == (P, 3))
p2 = t1.transform_points(torch.randn(1, P, 3))
self.assertTrue(p2.shape == (1, P, 3))
p3 = t1.transform_points(torch.randn(M, P, 3))
self.assertTrue(p3.shape == (M, P, 3))
p4 = tN.transform_points(torch.randn(P, 3))
self.assertTrue(p4.shape == (N, P, 3))
p5 = tN.transform_points(torch.randn(1, P, 3))
self.assertTrue(p5.shape == (N, P, 3))
def test_broadcast_transform_normals(self):
t1 = Scale(0.1, 0.1, 0.1)
N = 10
P = 7
M = 20
x = torch.tensor([0.2] * N)
y = torch.tensor([0.3] * N)
z = torch.tensor([0.4] * N)
tN = Translate(x, y, z)
p1 = t1.transform_normals(torch.randn(P, 3))
self.assertTrue(p1.shape == (P, 3))
p2 = t1.transform_normals(torch.randn(1, P, 3))
self.assertTrue(p2.shape == (1, P, 3))
p3 = t1.transform_normals(torch.randn(M, P, 3))
self.assertTrue(p3.shape == (M, P, 3))
p4 = tN.transform_normals(torch.randn(P, 3))
self.assertTrue(p4.shape == (N, P, 3))
p5 = tN.transform_normals(torch.randn(1, P, 3))
self.assertTrue(p5.shape == (N, P, 3))
def test_broadcast_compose(self):
t1 = Scale(0.1, 0.1, 0.1)
N = 10
scale_n = torch.tensor([0.3] * N)
tN = Scale(scale_n)
t1N = t1.compose(tN)
self.assertTrue(t1._matrix.shape == (1, 4, 4))
self.assertTrue(tN._matrix.shape == (N, 4, 4))
self.assertTrue(t1N.get_matrix().shape == (N, 4, 4))
t11 = t1.compose(t1)
self.assertTrue(t11.get_matrix().shape == (1, 4, 4))
def test_broadcast_compose_fail(self):
# Cannot compose two transforms which have batch dimensions N and M
# other than the case where either N or M is 1
N = 10
M = 20
scale_n = torch.tensor([0.3] * N)
tN = Scale(scale_n)
x = torch.tensor([0.2] * M)
y = torch.tensor([0.3] * M)
z = torch.tensor([0.4] * M)
tM = Translate(x, y, z)
with self.assertRaises(ValueError):
t = tN.compose(tM)
t.get_matrix()
def test_multiple_broadcast_compose(self):
t1 = Scale(0.1, 0.1, 0.1)
t2 = Scale(0.2, 0.2, 0.2)
N = 10
scale_n = torch.tensor([0.3] * N)
tN = Scale(scale_n)
t1N2 = t1.compose(tN.compose(t2))
composed_mat = t1N2.get_matrix()
self.assertTrue(composed_mat.shape == (N, 4, 4))
expected_mat = torch.eye(3, dtype=torch.float32) * 0.3 * 0.2 * 0.1
self.assertTrue(torch.allclose(composed_mat[0, :3, :3], expected_mat))
class TestRotate(unittest.TestCase):
def test_single_matrix(self):
R = torch.eye(3)
t = Rotate(R)
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
],
dtype=torch.float32,
)
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_invalid_dimensions(self):
R = torch.eye(4)
with self.assertRaises(ValueError):
Rotate(R)
def test_inverse(self, batch_size=5):
device = torch.device("cuda:0")
log_rot = torch.randn(
(batch_size, 3), dtype=torch.float32, device=device
)
R = so3_exponential_map(log_rot)
t = Rotate(R)
im = t.inverse()._matrix
im_2 = t._matrix.inverse()
im_comp = t.get_matrix().inverse()
self.assertTrue(torch.allclose(im, im_comp, atol=1e-4))
self.assertTrue(torch.allclose(im, im_2, atol=1e-4))
class TestRotateAxisAngle(unittest.TestCase):
def test_rotate_x_python_scalar(self):
t = RotateAxisAngle(angle=90, axis="X")
# fmt: off
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix))
def test_rotate_x_torch_scalar(self):
angle = torch.tensor(90.0)
t = RotateAxisAngle(angle=angle, axis="X")
# fmt: off
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_x_torch_tensor(self):
angle = torch.tensor([0, 45.0, 90.0]) # (N)
t = RotateAxisAngle(angle=angle, axis="X")
r2_i = 1 / math.sqrt(2)
r2_2 = math.sqrt(2) / 2
# fmt: off
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, r2_2, -r2_i, 0.0], # noqa: E241, E201
[0.0, r2_i, r2_2, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
],
[
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
angle = angle[..., None] # (N, 1)
t = RotateAxisAngle(angle=angle, axis="X")
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_y_python_scalar(self):
t = RotateAxisAngle(angle=90, axis="Y")
# fmt: off
matrix = torch.tensor(
[
[
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_y_torch_scalar(self):
angle = torch.tensor(90.0)
t = RotateAxisAngle(angle=angle, axis="Y")
# fmt: off
matrix = torch.tensor(
[
[
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_y_torch_tensor(self):
angle = torch.tensor([0, 45.0, 90.0])
t = RotateAxisAngle(angle=angle, axis="Y")
r2_i = 1 / math.sqrt(2)
r2_2 = math.sqrt(2) / 2
# fmt: off
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[ r2_2, 0.0, r2_i, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[-r2_i, 0.0, r2_2, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
],
[
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_z_python_scalar(self):
t = RotateAxisAngle(angle=90, axis="Z")
# fmt: off
matrix = torch.tensor(
[
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_z_torch_scalar(self):
angle = torch.tensor(90.0)
t = RotateAxisAngle(angle=angle, axis="Z")
# fmt: off
matrix = torch.tensor(
[
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_z_torch_tensor(self):
angle = torch.tensor([0, 45.0, 90.0])
t = RotateAxisAngle(angle=angle, axis="Z")
r2_i = 1 / math.sqrt(2)
r2_2 = math.sqrt(2) / 2
# fmt: off
matrix = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
[
[r2_2, -r2_i, 0.0, 0.0], # noqa: E241, E201
[r2_i, r2_2, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
],
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_rotate_compose_x_y_z(self):
angle = torch.tensor(90.0)
t1 = RotateAxisAngle(angle=angle, axis="X")
t2 = RotateAxisAngle(angle=angle, axis="Y")
t3 = RotateAxisAngle(angle=angle, axis="Z")
t = t1.compose(t2, t3)
# fmt: off
matrix1 = torch.tensor(
[
[
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
matrix2 = torch.tensor(
[
[
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
matrix3 = torch.tensor(
[
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
# order of transforms is t1 -> t2
matrix = torch.matmul(matrix1, torch.matmul(matrix2, matrix3))
composed_matrix = t.get_matrix()
self.assertTrue(torch.allclose(composed_matrix, matrix, atol=1e-7))
def test_rotate_angle_radians(self):
t = RotateAxisAngle(angle=math.pi / 2, degrees=False, axis="Z")
# fmt: off
matrix = torch.tensor(
[
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_lower_case_axis(self):
t = RotateAxisAngle(angle=90.0, axis="z")
# fmt: off
matrix = torch.tensor(
[
[
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
]
],
dtype=torch.float32,
)
# fmt: on
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
def test_axis_fail(self):
with self.assertRaises(ValueError):
RotateAxisAngle(angle=90.0, axis="P")
def test_rotate_angle_fail(self):
angle = torch.tensor([[0, 45.0, 90.0], [0, 45.0, 90.0]])
with self.assertRaises(ValueError):
RotateAxisAngle(angle=angle, axis="X")
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.renderer.utils import TensorProperties
from common_testing import TestCaseMixin
# Example class for testing
class TensorPropertiesTestClass(TensorProperties):
def __init__(self, x=None, y=None, device="cpu"):
super().__init__(device=device, x=x, y=y)
def clone(self):
other = TensorPropertiesTestClass()
return super().clone(other)
class TestTensorProperties(TestCaseMixin, unittest.TestCase):
def test_init(self):
example = TensorPropertiesTestClass(x=10.0, y=(100.0, 200.0))
# Check kwargs set as attributes + converted to tensors
self.assertTrue(torch.is_tensor(example.x))
self.assertTrue(torch.is_tensor(example.y))
# Check broadcasting
self.assertTrue(example.x.shape == (2,))
self.assertTrue(example.y.shape == (2,))
self.assertTrue(len(example) == 2)
def test_to(self):
# Check to method
example = TensorPropertiesTestClass(x=10.0, y=(100.0, 200.0))
device = torch.device("cuda:0")
new_example = example.to(device=device)
self.assertTrue(new_example.device == device)
def test_clone(self):
# Check clone method
example = TensorPropertiesTestClass(x=10.0, y=(100.0, 200.0))
new_example = example.clone()
self.assertSeparate(example.x, new_example.x)
self.assertSeparate(example.y, new_example.y)
def test_get_set(self):
# Test getitem returns an accessor which can be used to modify
# attributes at a particular index
example = TensorPropertiesTestClass(x=10.0, y=(100.0, 200.0, 300.0))
# update y1
example[1].y = 5.0
self.assertTrue(example.y[1] == 5.0)
# Get item and get value
ex0 = example[0]
self.assertTrue(ex0.y == 100.0)
def test_empty_input(self):
example = TensorPropertiesTestClass(x=(), y=())
self.assertTrue(len(example) == 0)
self.assertTrue(example.isempty())
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn.functional as F
from pytorch3d.ops.vert_align import vert_align
from pytorch3d.structures.meshes import Meshes
class TestVertAlign(unittest.TestCase):
@staticmethod
def vert_align_naive(
feats,
verts_or_meshes,
return_packed: bool = False,
align_corners: bool = True,
):
"""
Naive implementation of vert_align.
"""
if torch.is_tensor(feats):
feats = [feats]
N = feats[0].shape[0]
out_feats = []
# sample every example in the batch separately
for i in range(N):
out_i_feats = []
for feat in feats:
feats_i = feat[i][None, :, :, :] # (1, C, H, W)
if torch.is_tensor(verts_or_meshes):
grid = verts_or_meshes[i][None, None, :, :2] # (1, 1, V, 2)
elif hasattr(verts_or_meshes, "verts_list"):
grid = verts_or_meshes.verts_list()[i][
None, None, :, :2
] # (1, 1, V, 2)
else:
raise ValueError("verts_or_meshes is invalid")
feat_sampled_i = F.grid_sample(
feats_i,
grid,
mode="bilinear",
padding_mode="zeros",
align_corners=align_corners,
) # (1, C, 1, V)
feat_sampled_i = feat_sampled_i.squeeze(2).squeeze(0) # (C, V)
feat_sampled_i = feat_sampled_i.transpose(1, 0) # (V, C)
out_i_feats.append(feat_sampled_i)
out_i_feats = torch.cat(out_i_feats, 1) # (V, sum(C))
out_feats.append(out_i_feats)
if return_packed:
out_feats = torch.cat(out_feats, 0) # (sum(V), sum(C))
else:
out_feats = torch.stack(out_feats, 0) # (N, V, sum(C))
return out_feats
@staticmethod
def init_meshes(
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = (
torch.rand((num_verts, 3), dtype=torch.float32, device=device)
* 2.0
- 1.0
) # verts in the space of [-1, 1]
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
@staticmethod
def init_feats(
batch_size: int = 10, num_channels: int = 256, device: str = "cuda"
):
H, W = [14, 28], [14, 28]
feats = []
for (h, w) in zip(H, W):
feats.append(
torch.rand((batch_size, num_channels, h, w), device=device)
)
return feats
def test_vert_align_with_meshes(self):
"""
Test vert align vs naive implementation with meshes.
"""
meshes = TestVertAlign.init_meshes(10, 1000, 3000)
feats = TestVertAlign.init_feats(10, 256)
# feats in list
out = vert_align(feats, meshes, return_packed=True)
naive_out = TestVertAlign.vert_align_naive(
feats, meshes, return_packed=True
)
self.assertTrue(torch.allclose(out, naive_out))
# feats as tensor
out = vert_align(feats[0], meshes, return_packed=True)
naive_out = TestVertAlign.vert_align_naive(
feats[0], meshes, return_packed=True
)
self.assertTrue(torch.allclose(out, naive_out))
def test_vert_align_with_verts(self):
"""
Test vert align vs naive implementation with verts as tensor.
"""
feats = TestVertAlign.init_feats(10, 256)
verts = (
torch.rand(
(10, 100, 3), dtype=torch.float32, device=feats[0].device
)
* 2.0
- 1.0
)
# feats in list
out = vert_align(feats, verts, return_packed=True)
naive_out = TestVertAlign.vert_align_naive(
feats, verts, return_packed=True
)
self.assertTrue(torch.allclose(out, naive_out))
# feats as tensor
out = vert_align(feats[0], verts, return_packed=True)
naive_out = TestVertAlign.vert_align_naive(
feats[0], verts, return_packed=True
)
self.assertTrue(torch.allclose(out, naive_out))
out2 = vert_align(
feats[0], verts, return_packed=True, align_corners=False
)
naive_out2 = TestVertAlign.vert_align_naive(
feats[0], verts, return_packed=True, align_corners=False
)
self.assertFalse(torch.allclose(out, out2))
self.assertTrue(torch.allclose(out2, naive_out2))
@staticmethod
def vert_align_with_init(
num_meshes: int, num_verts: int, num_faces: int, device: str = "cpu"
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
feats = TestVertAlign.init_feats(num_meshes, device=device)
torch.cuda.synchronize()
def sample_features():
vert_align(feats, meshes, return_packed=True)
torch.cuda.synchronize()
return sample_features
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment