Commit d19e6243 authored by Jiali Duan's avatar Jiali Duan Committed by Facebook GitHub Bot
Browse files

Update Rasterizer and add end2end fisheye integration test

Summary:
1) Update rasterizer/point rasterizer to accommodate fisheyecamera. Specifically, transform_points is in placement of explicit transform compositions.

2) In rasterizer unittests, update corresponding tests for rasterizer and point_rasterizer. Address comments to test fisheye against perspective camera when distortions are turned off.

3) Address comments to add end2end test for fisheyecameras. In test_render_meshes, fisheyecameras are added to camera enuerations whenever possible.

4) Test renderings with fisheyecameras of different params on cow mesh.

5) Use compositions for linear cameras whenever possible.

Reviewed By: kjchalup

Differential Revision: D38932736

fbshipit-source-id: 5b7074fc001f2390f4cf43c7267a8b37fd987547
parent b0515e14
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
import math import math
import warnings import warnings
from typing import List, Optional, Sequence, Tuple, Union from typing import Callable, List, Optional, Sequence, Tuple, Union
import numpy as np import numpy as np
import torch import torch
...@@ -91,7 +91,7 @@ class CamerasBase(TensorProperties): ...@@ -91,7 +91,7 @@ class CamerasBase(TensorProperties):
# When joining objects into a batch, they will have to agree. # When joining objects into a batch, they will have to agree.
_SHARED_FIELDS: Tuple[str, ...] = () _SHARED_FIELDS: Tuple[str, ...] = ()
def get_projection_transform(self): def get_projection_transform(self, **kwargs):
""" """
Calculate the projective transformation matrix. Calculate the projective transformation matrix.
...@@ -1841,3 +1841,23 @@ def get_screen_to_ndc_transform( ...@@ -1841,3 +1841,23 @@ def get_screen_to_ndc_transform(
image_size=image_size, image_size=image_size,
).inverse() ).inverse()
return transform return transform
def try_get_projection_transform(cameras, kwargs) -> Optional[Callable]:
"""
Try block to get projection transform.
Args:
cameras instance, can be linear cameras or nonliear cameras
Returns:
If the camera implemented projection_transform, return the
projection transform; Otherwise, return None
"""
transform = None
try:
transform = cameras.get_projection_transform(**kwargs)
except NotImplementedError:
pass
return transform
...@@ -9,6 +9,7 @@ from typing import Optional, Tuple, Union ...@@ -9,6 +9,7 @@ from typing import Optional, Tuple, Union
import torch import torch
import torch.nn as nn import torch.nn as nn
from pytorch3d.renderer.cameras import try_get_projection_transform
from .rasterize_meshes import rasterize_meshes from .rasterize_meshes import rasterize_meshes
...@@ -197,12 +198,19 @@ class MeshRasterizer(nn.Module): ...@@ -197,12 +198,19 @@ class MeshRasterizer(nn.Module):
verts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( verts_view = cameras.get_world_to_view_transform(**kwargs).transform_points(
verts_world, eps=eps verts_world, eps=eps
) )
# view to NDC transform # Call transform_points instead of explicitly composing transforms to handle
# the case, where camera class does not have a projection matrix form.
verts_proj = cameras.transform_points(verts_world, eps=eps)
to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs) to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs)
projection_transform = cameras.get_projection_transform(**kwargs).compose( projection_transform = try_get_projection_transform(cameras, kwargs)
to_ndc_transform if projection_transform is not None:
) projection_transform = projection_transform.compose(to_ndc_transform)
verts_ndc = projection_transform.transform_points(verts_view, eps=eps) verts_ndc = projection_transform.transform_points(verts_view, eps=eps)
else:
# Call transform_points instead of explicitly composing transforms to handle
# the case, where camera class does not have a projection matrix form.
verts_proj = cameras.transform_points(verts_world, eps=eps)
verts_ndc = to_ndc_transform.transform_points(verts_proj, eps=eps)
verts_ndc[..., 2] = verts_view[..., 2] verts_ndc[..., 2] = verts_view[..., 2]
meshes_ndc = meshes_world.update_padded(new_verts_padded=verts_ndc) meshes_ndc = meshes_world.update_padded(new_verts_padded=verts_ndc)
......
...@@ -10,6 +10,7 @@ from typing import NamedTuple, Optional, Tuple, Union ...@@ -10,6 +10,7 @@ from typing import NamedTuple, Optional, Tuple, Union
import torch import torch
import torch.nn as nn import torch.nn as nn
from pytorch3d.renderer.cameras import try_get_projection_transform
from pytorch3d.structures import Pointclouds from pytorch3d.structures import Pointclouds
from .rasterize_points import rasterize_points from .rasterize_points import rasterize_points
...@@ -103,12 +104,16 @@ class PointsRasterizer(nn.Module): ...@@ -103,12 +104,16 @@ class PointsRasterizer(nn.Module):
pts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( pts_view = cameras.get_world_to_view_transform(**kwargs).transform_points(
pts_world, eps=eps pts_world, eps=eps
) )
# view to NDC transform
to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs) to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs)
projection_transform = cameras.get_projection_transform(**kwargs).compose( projection_transform = try_get_projection_transform(cameras, kwargs)
to_ndc_transform if projection_transform is not None:
) projection_transform = projection_transform.compose(to_ndc_transform)
pts_ndc = projection_transform.transform_points(pts_view, eps=eps) pts_ndc = projection_transform.transform_points(pts_view, eps=eps)
else:
# Call transform_points instead of explicitly composing transforms to handle
# the case, where camera class does not have a projection matrix form.
pts_proj = cameras.transform_points(pts_world, eps=eps)
pts_ndc = to_ndc_transform.transform_points(pts_proj, eps=eps)
pts_ndc[..., 2] = pts_view[..., 2] pts_ndc[..., 2] = pts_view[..., 2]
point_clouds = point_clouds.update_padded(pts_ndc) point_clouds = point_clouds.update_padded(pts_ndc)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment