Commit b19fe1de authored by Christoph Lassner's avatar Christoph Lassner Committed by Facebook GitHub Bot
Browse files

pulsar integration.

Summary:
This diff integrates the pulsar renderer source code into PyTorch3D as an alternative backend for the PyTorch3D point renderer. This diff is the first of a series of three diffs to complete that migration and focuses on the packaging and integration of the source code.

For more information about the pulsar backend, see the release notes and the paper (https://arxiv.org/abs/2004.07484). For information on how to use the backend, see the point cloud rendering notebook and the examples in the folder `docs/examples`.

Tasks addressed in the following diffs:
* Add the PyTorch3D interface,
* Add notebook examples and documentation (or adapt the existing ones to feature both interfaces).

Reviewed By: nikhilaravi

Differential Revision: D23947736

fbshipit-source-id: a5e77b53e6750334db22aefa89b4c079cda1b443
parent d5650323
......@@ -43,3 +43,7 @@ def bm_mesh_rasterizer_transform() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_mesh_rasterizer_transform()
......@@ -33,3 +33,7 @@ def bm_compute_packed_padded_meshes() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_compute_packed_padded_meshes()
......@@ -38,3 +38,7 @@ def bm_packed_to_padded() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_packed_to_padded()
......@@ -23,3 +23,7 @@ def bm_perspective_n_points() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_perspective_n_points()
......@@ -34,3 +34,7 @@ def bm_point_mesh_distance() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_point_mesh_distance()
......@@ -28,3 +28,7 @@ def bm_compute_packed_padded_pointclouds() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_compute_packed_padded_pointclouds()
......@@ -69,3 +69,8 @@ def bm_corresponding_points_alignment() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_corresponding_points_alignment()
bm_iterative_closest_point()
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""Test render speed."""
import logging
import sys
from os import path
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.renderer.points.pulsar import Renderer
from torch.autograd import Variable
# Making sure you can run this, even if pulsar hasn't been installed yet.
sys.path.insert(0, path.join(path.dirname(__file__), ".."))
LOGGER = logging.getLogger(__name__)
"""Measure the execution speed of the rendering.
This measures a very pessimistic upper bound on speed, because synchronization
points have to be introduced in Python. On a pure PyTorch execution pipeline,
results should be significantly faster. You can get pure CUDA timings through
C++ by activating `PULSAR_TIMINGS_BATCHED_ENABLED` in the file
`pytorch3d/csrc/pulsar/logging.h` or defining it for your compiler.
"""
def _bm_pulsar():
n_points = 1_000_000
width = 1_000
height = 1_000
renderer = Renderer(width, height, n_points)
# Generate sample data.
torch.manual_seed(1)
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
vert_pos[:, 2] += 25.0
vert_pos[:, :2] -= 5.0
vert_col = torch.rand(n_points, 3, dtype=torch.float32)
vert_rad = torch.rand(n_points, dtype=torch.float32)
cam_params = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
)
device = torch.device("cuda")
vert_pos = vert_pos.to(device)
vert_col = vert_col.to(device)
vert_rad = vert_rad.to(device)
cam_params = cam_params.to(device)
renderer = renderer.to(device)
vert_pos_var = Variable(vert_pos, requires_grad=False)
vert_col_var = Variable(vert_col, requires_grad=False)
vert_rad_var = Variable(vert_rad, requires_grad=False)
cam_params_var = Variable(cam_params, requires_grad=False)
def bm_closure():
renderer.forward(
vert_pos_var,
vert_col_var,
vert_rad_var,
cam_params_var,
1.0e-1,
45.0,
percent_allowed_difference=0.01,
)
torch.cuda.synchronize()
return bm_closure
def _bm_pulsar_backward():
n_points = 1_000_000
width = 1_000
height = 1_000
renderer = Renderer(width, height, n_points)
# Generate sample data.
torch.manual_seed(1)
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
vert_pos[:, 2] += 25.0
vert_pos[:, :2] -= 5.0
vert_col = torch.rand(n_points, 3, dtype=torch.float32)
vert_rad = torch.rand(n_points, dtype=torch.float32)
cam_params = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
)
device = torch.device("cuda")
vert_pos = vert_pos.to(device)
vert_col = vert_col.to(device)
vert_rad = vert_rad.to(device)
cam_params = cam_params.to(device)
renderer = renderer.to(device)
vert_pos_var = Variable(vert_pos, requires_grad=True)
vert_col_var = Variable(vert_col, requires_grad=True)
vert_rad_var = Variable(vert_rad, requires_grad=True)
cam_params_var = Variable(cam_params, requires_grad=True)
res = renderer.forward(
vert_pos_var,
vert_col_var,
vert_rad_var,
cam_params_var,
1.0e-1,
45.0,
percent_allowed_difference=0.01,
)
loss = res.sum()
def bm_closure():
loss.backward(retain_graph=True)
torch.cuda.synchronize()
return bm_closure
def bm_pulsar() -> None:
if not torch.cuda.is_available():
return
benchmark(_bm_pulsar, "PULSAR_FORWARD", [{}], warmup_iters=3)
benchmark(_bm_pulsar_backward, "PULSAR_BACKWARD", [{}], warmup_iters=3)
if __name__ == "__main__":
bm_pulsar()
......@@ -85,3 +85,7 @@ def bm_rasterize_meshes() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_rasterize_meshes()
......@@ -80,3 +80,7 @@ def bm_python_vs_cpu_vs_cuda() -> None:
benchmark(
_bm_rasterize_points_with_init, "RASTERIZE_CUDA", kwargs_list, warmup_iters=1
)
if __name__ == "__main__":
bm_python_vs_cpu_vs_cuda()
......@@ -36,3 +36,7 @@ def bm_sample_points() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_sample_points()
......@@ -13,3 +13,7 @@ def bm_so3() -> None:
]
benchmark(TestSO3.so3_expmap, "SO3_EXP", kwargs_list, warmup_iters=1)
benchmark(TestSO3.so3_logmap, "SO3_LOG", kwargs_list, warmup_iters=1)
if __name__ == "__main__":
bm_so3()
......@@ -21,3 +21,7 @@ def bm_subdivide() -> None:
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_subdivide()
......@@ -27,3 +27,7 @@ def bm_vert_align() -> None:
benchmark(
TestVertAlign.vert_align_with_init, "VERT_ALIGN", kwargs_list, warmup_iters=1
)
if __name__ == "__main__":
bm_vert_align()
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""Create multiview data."""
import sys
from os import path
# Making sure you can run this, even if pulsar hasn't been installed yet.
sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
def create_multiview():
"""Test multiview optimization."""
from pytorch3d.renderer.points.pulsar import Renderer
import torch
from torch import nn
import imageio
from torch.autograd import Variable
# import cv2
# import skvideo.io
import numpy as np
# Constructor.
n_points = 10
width = 1000
height = 1000
class Model(nn.Module):
"""A dummy model to test the integration into a stacked model."""
def __init__(self):
super(Model, self).__init__()
self.gamma = 0.1
self.renderer = Renderer(width, height, n_points)
def forward(self, vp, vc, vr, cam_params):
# self.gamma *= 0.995
# print("gamma: ", self.gamma)
return self.renderer.forward(vp, vc, vr, cam_params, self.gamma, 45.0)
# Generate sample data.
torch.manual_seed(1)
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
vert_pos[:, 2] += 25.0
vert_pos[:, :2] -= 5.0
# print(vert_pos[0])
vert_col = torch.rand(n_points, 3, dtype=torch.float32)
vert_rad = torch.rand(n_points, dtype=torch.float32)
# Distortion.
# vert_pos[:, 1] += 0.5
vert_col *= 0.5
# vert_rad *= 0.7
for device in [torch.device("cuda")]:
model = Model().to(device)
vert_pos = vert_pos.to(device)
vert_col = vert_col.to(device)
vert_rad = vert_rad.to(device)
for angle_idx, angle in enumerate([-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]):
vert_pos_v = Variable(vert_pos, requires_grad=False)
vert_col_v = Variable(vert_col, requires_grad=False)
vert_rad_v = Variable(vert_rad, requires_grad=False)
cam_params = torch.tensor(
[
np.sin(angle) * 35.0,
0.0,
30.0 - np.cos(angle) * 35.0,
0.0,
-angle,
0.0,
5.0,
2.0,
],
dtype=torch.float32,
).to(device)
cam_params_v = Variable(cam_params, requires_grad=False)
result = model.forward(vert_pos_v, vert_col_v, vert_rad_v, cam_params_v)
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
imageio.imsave(
"reference/examples_TestRenderer_test_multiview_%d.png" % (angle_idx),
result_im,
)
if __name__ == "__main__":
create_multiview()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment