Commit 3d7dea58 authored by Georgia Gkioxari's avatar Georgia Gkioxari Committed by Facebook GitHub Bot
Browse files

remove unused params + cubify note

Summary:
This diff
* removes the unused compositing params
* adds a note describing cubify

Reviewed By: nikhilaravi

Differential Revision: D22426191

fbshipit-source-id: e8aa32040bb594e1dfd7d6d98e29264feefcec7c
parent 38eadb75
# Cubify
The [cubify operator](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/ops/cubify.py) converts an 3D occupancy grid of shape `BxDxHxW`, where `B` is the batch size, into a mesh instantiated as a [Meshes](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/structures/meshes.py) data structure of `B` elements. The operator replaces every occupied voxel (if its occupancy probability is greater than a user defined threshold) with a cuboid of 12 faces and 8 vertices. Shared vertices are merged, and internal faces are removed resulting in a **watertight** mesh.
The operator provides three alignment modes {*topleft*, *corner*, *center*} which define the span of the mesh vertices with respect to the voxel grid. The alignment modes are described in the figure below for a 2D grid.
![input](https://user-images.githubusercontent.com/4369065/81032959-af697380-8e46-11ea-91a8-fae89597f988.png)
...@@ -14,11 +14,6 @@ from pytorch3d import _C ...@@ -14,11 +14,6 @@ from pytorch3d import _C
# This can be an image (C=3) or a set of features. # This can be an image (C=3) or a set of features.
# Data class to store blending params with defaults
class CompositeParams(NamedTuple):
radius: float = 4.0 / 256.0
class _CompositeAlphaPoints(torch.autograd.Function): class _CompositeAlphaPoints(torch.autograd.Function):
""" """
Composite features within a z-buffer using alpha compositing. Given a z-buffer Composite features within a z-buffer using alpha compositing. Given a z-buffer
...@@ -67,7 +62,7 @@ class _CompositeAlphaPoints(torch.autograd.Function): ...@@ -67,7 +62,7 @@ class _CompositeAlphaPoints(torch.autograd.Function):
return grad_features, grad_alphas, grad_points_idx, None return grad_features, grad_alphas, grad_points_idx, None
def alpha_composite(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor: def alpha_composite(pointsidx, alphas, pt_clds) -> torch.Tensor:
""" """
Composite features within a z-buffer using alpha compositing. Given a z-buffer Composite features within a z-buffer using alpha compositing. Given a z-buffer
with corresponding features and weights, these values are accumulated according with corresponding features and weights, these values are accumulated according
...@@ -147,7 +142,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function): ...@@ -147,7 +142,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function):
return grad_features, grad_alphas, grad_points_idx, None return grad_features, grad_alphas, grad_points_idx, None
def norm_weighted_sum(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor: def norm_weighted_sum(pointsidx, alphas, pt_clds) -> torch.Tensor:
""" """
Composite features within a z-buffer using normalized weighted sum. Given a z-buffer Composite features within a z-buffer using normalized weighted sum. Given a z-buffer
with corresponding features and weights, these values are accumulated with corresponding features and weights, these values are accumulated
...@@ -226,7 +221,7 @@ class _CompositeWeightedSumPoints(torch.autograd.Function): ...@@ -226,7 +221,7 @@ class _CompositeWeightedSumPoints(torch.autograd.Function):
return grad_features, grad_alphas, grad_points_idx, None return grad_features, grad_alphas, grad_points_idx, None
def weighted_sum(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor: def weighted_sum(pointsidx, alphas, pt_clds) -> torch.Tensor:
""" """
Composite features within a z-buffer using normalized weighted sum. Composite features within a z-buffer using normalized weighted sum.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
from ..compositing import CompositeParams, alpha_composite, norm_weighted_sum from ..compositing import alpha_composite, norm_weighted_sum
# A compositor should take as input 3D points and some corresponding information. # A compositor should take as input 3D points and some corresponding information.
...@@ -16,15 +16,11 @@ class AlphaCompositor(nn.Module): ...@@ -16,15 +16,11 @@ class AlphaCompositor(nn.Module):
Accumulate points using alpha compositing. Accumulate points using alpha compositing.
""" """
def __init__(self, composite_params=None): def __init__(self):
super().__init__() super().__init__()
self.composite_params = (
composite_params if composite_params is not None else CompositeParams()
)
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
images = alpha_composite(fragments, alphas, ptclds, self.composite_params) images = alpha_composite(fragments, alphas, ptclds)
return images return images
...@@ -33,12 +29,9 @@ class NormWeightedCompositor(nn.Module): ...@@ -33,12 +29,9 @@ class NormWeightedCompositor(nn.Module):
Accumulate points using a normalized weighted sum. Accumulate points using a normalized weighted sum.
""" """
def __init__(self, composite_params=None): def __init__(self):
super().__init__() super().__init__()
self.composite_params = (
composite_params if composite_params is not None else CompositeParams()
)
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
images = norm_weighted_sum(fragments, alphas, ptclds, self.composite_params) images = norm_weighted_sum(fragments, alphas, ptclds)
return images return images
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment