Commit 57a22e73 authored by Georgia Gkioxari's avatar Georgia Gkioxari Committed by Facebook GitHub Bot
Browse files

camera refactoring

Summary:
Refactor cameras
* CamerasBase was enhanced with `transform_points_screen` that transforms projected points from NDC to screen space
* OpenGLPerspective, OpenGLOrthographic -> FoVPerspective, FoVOrthographic
* SfMPerspective, SfMOrthographic -> Perspective, Orthographic
* PerspectiveCamera can optionally be constructred with screen space parameters
* Note on Cameras and coordinate systems was added

Reviewed By: nikhilaravi

Differential Revision: D23168525

fbshipit-source-id: dd138e2b2cc7e0e0d9f34c45b8251c01266a2063
parent 9242e7e6
...@@ -18,7 +18,7 @@ from pytorch3d.datasets import ( ...@@ -18,7 +18,7 @@ from pytorch3d.datasets import (
render_cubified_voxels, render_cubified_voxels,
) )
from pytorch3d.renderer import ( from pytorch3d.renderer import (
OpenGLPerspectiveCameras, FoVPerspectiveCameras,
PointLights, PointLights,
RasterizationSettings, RasterizationSettings,
look_at_view_transform, look_at_view_transform,
...@@ -211,7 +211,7 @@ class TestR2N2(TestCaseMixin, unittest.TestCase): ...@@ -211,7 +211,7 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
# Render first three models in the dataset. # Render first three models in the dataset.
R, T = look_at_view_transform(1.0, 1.0, 90) R, T = look_at_view_transform(1.0, 1.0, 90)
cameras = OpenGLPerspectiveCameras(R=R, T=T, device=device) cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
raster_settings = RasterizationSettings(image_size=512) raster_settings = RasterizationSettings(image_size=512)
lights = PointLights( lights = PointLights(
location=torch.tensor([0.0, 1.0, -2.0], device=device)[None], location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
......
...@@ -7,7 +7,7 @@ from pathlib import Path ...@@ -7,7 +7,7 @@ from pathlib import Path
import numpy as np import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform from pytorch3d.renderer.cameras import FoVPerspectiveCameras, look_at_view_transform
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
from pytorch3d.renderer.points.rasterizer import ( from pytorch3d.renderer.points.rasterizer import (
PointsRasterizationSettings, PointsRasterizationSettings,
...@@ -43,7 +43,7 @@ class TestMeshRasterizer(unittest.TestCase): ...@@ -43,7 +43,7 @@ class TestMeshRasterizer(unittest.TestCase):
# Init rasterizer settings # Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0) R, T = look_at_view_transform(2.7, 0, 0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
) )
...@@ -148,7 +148,7 @@ class TestPointRasterizer(unittest.TestCase): ...@@ -148,7 +148,7 @@ class TestPointRasterizer(unittest.TestCase):
verts_padded[..., 0] += 0.2 verts_padded[..., 0] += 0.2
pointclouds = Pointclouds(points=verts_padded) pointclouds = Pointclouds(points=verts_padded)
R, T = look_at_view_transform(2.7, 0.0, 0.0) R, T = look_at_view_transform(2.7, 0.0, 0.0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = PointsRasterizationSettings( raster_settings = PointsRasterizationSettings(
image_size=256, radius=5e-2, points_per_pixel=1 image_size=256, radius=5e-2, points_per_pixel=1
) )
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
""" """
Sanity checks for output images from the renderer. Sanity checks for output images from the renderer.
""" """
import os
import unittest import unittest
from pathlib import Path from pathlib import Path
...@@ -12,7 +13,13 @@ import torch ...@@ -12,7 +13,13 @@ import torch
from common_testing import TestCaseMixin, load_rgb_image from common_testing import TestCaseMixin, load_rgb_image
from PIL import Image from PIL import Image
from pytorch3d.io import load_obj from pytorch3d.io import load_obj
from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform from pytorch3d.renderer.cameras import (
FoVOrthographicCameras,
FoVPerspectiveCameras,
OrthographicCameras,
PerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.lighting import PointLights from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.materials import Materials from pytorch3d.renderer.materials import Materials
from pytorch3d.renderer.mesh import TexturesAtlas, TexturesUV, TexturesVertex from pytorch3d.renderer.mesh import TexturesAtlas, TexturesUV, TexturesVertex
...@@ -60,14 +67,20 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -60,14 +67,20 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
if elevated_camera: if elevated_camera:
# Elevated and rotated camera # Elevated and rotated camera
R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0) R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
postfix = "_elevated_camera" postfix = "_elevated_"
# If y axis is up, the spot of light should # If y axis is up, the spot of light should
# be on the bottom left of the sphere. # be on the bottom left of the sphere.
else: else:
# No elevation or azimuth rotation # No elevation or azimuth rotation
R, T = look_at_view_transform(2.7, 0.0, 0.0) R, T = look_at_view_transform(2.7, 0.0, 0.0)
postfix = "" postfix = "_"
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) for cam_type in (
FoVPerspectiveCameras,
FoVOrthographicCameras,
PerspectiveCameras,
OrthographicCameras,
):
cameras = cam_type(device=device, R=R, T=T)
# Init shader settings # Init shader settings
materials = Materials(device=device) materials = Materials(device=device)
...@@ -77,7 +90,9 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -77,7 +90,9 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1 image_size=512, blur_radius=0.0, faces_per_pixel=1
) )
rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
# Test several shaders # Test several shaders
...@@ -95,16 +110,21 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -95,16 +110,21 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
) )
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_mesh) images = renderer(sphere_mesh)
filename = "simple_sphere_light_%s%s.png" % (name, postfix)
image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
rgb = images[0, ..., :3].squeeze().cpu() rgb = images[0, ..., :3].squeeze().cpu()
filename = "simple_sphere_light_%s%s%s.png" % (
name,
postfix,
cam_type.__name__,
)
image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
self.assertClose(rgb, image_ref, atol=0.05)
if DEBUG: if DEBUG:
filename = "DEBUG_%s" % filename filename = "DEBUG_%s" % filename
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename DATA_DIR / filename
) )
self.assertClose(rgb, image_ref, atol=0.05)
######################################################## ########################################################
# Move the light to the +z axis in world space so it is # Move the light to the +z axis in world space so it is
...@@ -122,14 +142,17 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -122,14 +142,17 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
images = phong_renderer(sphere_mesh, lights=lights) images = phong_renderer(sphere_mesh, lights=lights)
rgb = images[0, ..., :3].squeeze().cpu() rgb = images[0, ..., :3].squeeze().cpu()
if DEBUG: if DEBUG:
filename = "DEBUG_simple_sphere_dark%s.png" % postfix filename = "DEBUG_simple_sphere_dark%s%s.png" % (
postfix,
cam_type.__name__,
)
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename DATA_DIR / filename
) )
# Load reference image
image_ref_phong_dark = load_rgb_image( image_ref_phong_dark = load_rgb_image(
"test_simple_sphere_dark%s.png" % postfix, DATA_DIR "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__),
DATA_DIR,
) )
self.assertClose(rgb, image_ref_phong_dark, atol=0.05) self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
...@@ -142,6 +165,60 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -142,6 +165,60 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
""" """
self.test_simple_sphere(elevated_camera=True) self.test_simple_sphere(elevated_camera=True)
def test_simple_sphere_screen(self):
"""
Test output when rendering with PerspectiveCameras & OrthographicCameras
in NDC vs screen space.
"""
device = torch.device("cuda:0")
# Init mesh
sphere_mesh = ico_sphere(5, device)
verts_padded = sphere_mesh.verts_padded()
faces_padded = sphere_mesh.faces_padded()
feats = torch.ones_like(verts_padded, device=device)
textures = TexturesVertex(verts_features=feats)
sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)
R, T = look_at_view_transform(2.7, 0.0, 0.0)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1
)
for cam_type in (PerspectiveCameras, OrthographicCameras):
cameras = cam_type(
device=device,
R=R,
T=T,
principal_point=((256.0, 256.0),),
focal_length=((256.0, 256.0),),
image_size=((512, 512),),
)
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
shader = HardPhongShader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
)
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_mesh)
rgb = images[0, ..., :3].squeeze().cpu()
filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__
image_ref = load_rgb_image(filename, DATA_DIR)
self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere_batched(self): def test_simple_sphere_batched(self):
""" """
Test a mesh with vertex textures can be extended to form a batch, and Test a mesh with vertex textures can be extended to form a batch, and
...@@ -165,7 +242,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -165,7 +242,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
elev = torch.zeros_like(dist) elev = torch.zeros_like(dist)
azim = torch.zeros_like(dist) azim = torch.zeros_like(dist)
R, T = look_at_view_transform(dist, elev, azim) R, T = look_at_view_transform(dist, elev, azim)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1 image_size=512, blur_radius=0.0, faces_per_pixel=1
) )
...@@ -193,12 +270,16 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -193,12 +270,16 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_meshes) images = renderer(sphere_meshes)
image_ref = load_rgb_image( image_ref = load_rgb_image(
"test_simple_sphere_light_%s.png" % name, DATA_DIR "test_simple_sphere_light_%s_%s.png" % (name, type(cameras).__name__),
DATA_DIR,
) )
for i in range(batch_size): for i in range(batch_size):
rgb = images[i, ..., :3].squeeze().cpu() rgb = images[i, ..., :3].squeeze().cpu()
if i == 0 and DEBUG: if i == 0 and DEBUG:
filename = "DEBUG_simple_sphere_batched_%s.png" % name filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
name,
type(cameras).__name__,
)
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename DATA_DIR / filename
) )
...@@ -209,8 +290,6 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -209,8 +290,6 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
Test silhouette blending. Also check that gradient calculation works. Test silhouette blending. Also check that gradient calculation works.
""" """
device = torch.device("cuda:0") device = torch.device("cuda:0")
ref_filename = "test_silhouette.png"
image_ref_filename = DATA_DIR / ref_filename
sphere_mesh = ico_sphere(5, device) sphere_mesh = ico_sphere(5, device)
verts, faces = sphere_mesh.get_mesh_verts_faces(0) verts, faces = sphere_mesh.get_mesh_verts_faces(0)
sphere_mesh = Meshes(verts=[verts], faces=[faces]) sphere_mesh = Meshes(verts=[verts], faces=[faces])
...@@ -225,20 +304,33 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -225,20 +304,33 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
# Init rasterizer settings # Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0) R, T = look_at_view_transform(2.7, 0, 0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) for cam_type in (
FoVPerspectiveCameras,
FoVOrthographicCameras,
PerspectiveCameras,
OrthographicCameras,
):
cameras = cam_type(device=device, R=R, T=T)
# Init renderer # Init renderer
renderer = MeshRenderer( renderer = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params), shader=SoftSilhouetteShader(blend_params=blend_params),
) )
images = renderer(sphere_mesh) images = renderer(sphere_mesh)
alpha = images[0, ..., 3].squeeze().cpu() alpha = images[0, ..., 3].squeeze().cpu()
if DEBUG: if DEBUG:
Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save( filename = os.path.join(
DATA_DIR / "DEBUG_silhouette.png" DATA_DIR, "DEBUG_%s_silhouette.png" % (cam_type.__name__)
)
Image.fromarray((alpha.detach().numpy() * 255).astype(np.uint8)).save(
filename
) )
ref_filename = "test_%s_silhouette.png" % (cam_type.__name__)
image_ref_filename = DATA_DIR / ref_filename
with Image.open(image_ref_filename) as raw_image_ref: with Image.open(image_ref_filename) as raw_image_ref:
image_ref = torch.from_numpy(np.array(raw_image_ref)) image_ref = torch.from_numpy(np.array(raw_image_ref))
...@@ -274,7 +366,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -274,7 +366,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
# Init rasterizer settings # Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0) R, T = look_at_view_transform(2.7, 0, 0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1 image_size=512, blur_radius=0.0, faces_per_pixel=1
...@@ -337,7 +429,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -337,7 +429,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
########################################## ##########################################
R, T = look_at_view_transform(2.7, 0, 180) R, T = look_at_view_transform(2.7, 0, 180)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
# Move light to the front of the cow in world space # Move light to the front of the cow in world space
lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
...@@ -367,7 +459,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -367,7 +459,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
# Add blurring to rasterization # Add blurring to rasterization
################################# #################################
R, T = look_at_view_transform(2.7, 0, 180) R, T = look_at_view_transform(2.7, 0, 180)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
blend_params = BlendParams(sigma=5e-4, gamma=1e-4) blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, image_size=512,
...@@ -429,7 +521,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -429,7 +521,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
# Init rasterizer settings # Init rasterizer settings
R, T = look_at_view_transform(2.7, 0.0, 0.0) R, T = look_at_view_transform(2.7, 0.0, 0.0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1 image_size=512, blur_radius=0.0, faces_per_pixel=1
) )
...@@ -490,7 +582,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): ...@@ -490,7 +582,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
# Init rasterizer settings # Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0) R, T = look_at_view_transform(2.7, 0, 0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings( raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True
......
...@@ -14,8 +14,8 @@ import torch ...@@ -14,8 +14,8 @@ import torch
from common_testing import TestCaseMixin, load_rgb_image from common_testing import TestCaseMixin, load_rgb_image
from PIL import Image from PIL import Image
from pytorch3d.renderer.cameras import ( from pytorch3d.renderer.cameras import (
OpenGLOrthographicCameras, FoVOrthographicCameras,
OpenGLPerspectiveCameras, FoVPerspectiveCameras,
look_at_view_transform, look_at_view_transform,
) )
from pytorch3d.renderer.points import ( from pytorch3d.renderer.points import (
...@@ -47,7 +47,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
points=verts_padded, features=torch.ones_like(verts_padded) points=verts_padded, features=torch.ones_like(verts_padded)
) )
R, T = look_at_view_transform(2.7, 0.0, 0.0) R, T = look_at_view_transform(2.7, 0.0, 0.0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = PointsRasterizationSettings( raster_settings = PointsRasterizationSettings(
image_size=256, radius=5e-2, points_per_pixel=1 image_size=256, radius=5e-2, points_per_pixel=1
) )
...@@ -97,7 +97,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
point_cloud = Pointclouds(points=[verts], features=[rgb_feats]) point_cloud = Pointclouds(points=[verts], features=[rgb_feats])
R, T = look_at_view_transform(20, 10, 0) R, T = look_at_view_transform(20, 10, 0)
cameras = OpenGLOrthographicCameras(device=device, R=R, T=T, znear=0.01) cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)
raster_settings = PointsRasterizationSettings( raster_settings = PointsRasterizationSettings(
# Set image_size so it is not a multiple of 16 (min bin_size) # Set image_size so it is not a multiple of 16 (min bin_size)
...@@ -150,7 +150,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
batch_size = 20 batch_size = 20
pointclouds = pointclouds.extend(batch_size) pointclouds = pointclouds.extend(batch_size)
R, T = look_at_view_transform(2.7, 0.0, 0.0) R, T = look_at_view_transform(2.7, 0.0, 0.0)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = PointsRasterizationSettings( raster_settings = PointsRasterizationSettings(
image_size=256, radius=5e-2, points_per_pixel=1 image_size=256, radius=5e-2, points_per_pixel=1
) )
......
...@@ -12,7 +12,7 @@ from common_testing import TestCaseMixin, load_rgb_image ...@@ -12,7 +12,7 @@ from common_testing import TestCaseMixin, load_rgb_image
from PIL import Image from PIL import Image
from pytorch3d.datasets import ShapeNetCore, collate_batched_meshes from pytorch3d.datasets import ShapeNetCore, collate_batched_meshes
from pytorch3d.renderer import ( from pytorch3d.renderer import (
OpenGLPerspectiveCameras, FoVPerspectiveCameras,
PointLights, PointLights,
RasterizationSettings, RasterizationSettings,
look_at_view_transform, look_at_view_transform,
...@@ -174,7 +174,7 @@ class TestShapenetCore(TestCaseMixin, unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestShapenetCore(TestCaseMixin, unittest.TestCase):
# Rendering settings. # Rendering settings.
R, T = look_at_view_transform(1.0, 1.0, 90) R, T = look_at_view_transform(1.0, 1.0, 90)
cameras = OpenGLPerspectiveCameras(R=R, T=T, device=device) cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
raster_settings = RasterizationSettings(image_size=512) raster_settings = RasterizationSettings(image_size=512)
lights = PointLights( lights = PointLights(
location=torch.tensor([0.0, 1.0, -2.0], device=device)[None], location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment