Commit e13e63a8 authored by Shubham Goel's avatar Shubham Goel Committed by Facebook GitHub Bot
Browse files

bugfix in cotcurv laplacian loss. closes #551 (#553)

Summary: Pull Request resolved: https://github.com/facebookresearch/pytorch3d/pull/553

Reviewed By: theschnitz

Differential Revision: D26257591

Pulled By: gkioxari

fbshipit-source-id: 899a3f733a77361e8572b0900a34b55764ff08f2
parent 17468e28
...@@ -108,6 +108,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"): ...@@ -108,6 +108,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
idx = norm_w > 0 idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx] norm_w[idx] = 1.0 / norm_w[idx]
else: else:
L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
norm_w = 0.25 * inv_areas norm_w = 0.25 * inv_areas
else: else:
raise ValueError("Method should be one of {uniform, cot, cotcurv}") raise ValueError("Method should be one of {uniform, cot, cotcurv}")
...@@ -117,7 +118,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"): ...@@ -117,7 +118,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
elif method == "cot": elif method == "cot":
loss = L.mm(verts_packed) * norm_w - verts_packed loss = L.mm(verts_packed) * norm_w - verts_packed
elif method == "cotcurv": elif method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * norm_w loss = (L.mm(verts_packed) - L_sum * verts_packed) * norm_w
loss = loss.norm(dim=1) loss = loss.norm(dim=1)
loss = loss * weights loss = loss * weights
......
...@@ -89,11 +89,12 @@ class TestLaplacianSmoothing(unittest.TestCase): ...@@ -89,11 +89,12 @@ class TestLaplacianSmoothing(unittest.TestCase):
inv_areas[idx] = 1.0 / inv_areas[idx] inv_areas[idx] = 1.0 / inv_areas[idx]
norm_w = L.sum(dim=1, keepdims=True) norm_w = L.sum(dim=1, keepdims=True)
L_sum = norm_w.clone()
idx = norm_w > 0 idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx] norm_w[idx] = 1.0 / norm_w[idx]
if method == "cotcurv": if method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * inv_areas * 0.25 loss = (L.mm(verts_packed) - L_sum * verts_packed) * inv_areas * 0.25
loss = loss.norm(dim=1) loss = loss.norm(dim=1)
else: else:
loss = L.mm(verts_packed) * norm_w - verts_packed loss = L.mm(verts_packed) * norm_w - verts_packed
...@@ -147,7 +148,7 @@ class TestLaplacianSmoothing(unittest.TestCase): ...@@ -147,7 +148,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
def test_laplacian_smoothing_cot(self): def test_laplacian_smoothing_cot(self):
""" """
Test Laplacian Smoothing with uniform weights. Test Laplacian Smoothing with cot weights.
""" """
meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300) meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300)
...@@ -161,7 +162,7 @@ class TestLaplacianSmoothing(unittest.TestCase): ...@@ -161,7 +162,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
def test_laplacian_smoothing_cotcurv(self): def test_laplacian_smoothing_cotcurv(self):
""" """
Test Laplacian Smoothing with uniform weights. Test Laplacian Smoothing with cotcurv weights.
""" """
meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300) meshes = TestLaplacianSmoothing.init_meshes(10, 100, 300)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment