Commit d57daa6f authored by Patrick Labatut's avatar Patrick Labatut Committed by Facebook GitHub Bot
Browse files

Address black + isort fbsource linter warnings

Summary: Address black + isort fbsource linter warnings from D20558374 (previous diff)

Reviewed By: nikhilaravi

Differential Revision: D20558373

fbshipit-source-id: d3607de4a01fb24c0d5269634563a7914bddf1c8
parent eb512ffd
......@@ -5,6 +5,7 @@ import glob
import importlib
from os.path import basename, dirname, isfile, join, sys
if __name__ == "__main__":
# pyre-ignore[16]
if len(sys.argv) > 1:
......@@ -25,7 +26,5 @@ if __name__ == "__main__":
for attr in dir(module):
# Run all the functions with names "bm_*" in the module.
if attr.startswith("bm_"):
print(
"Running benchmarks for " + module_name + "/" + attr + "..."
)
print("Running benchmarks for " + module_name + "/" + attr + "...")
getattr(module, attr)()
......@@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_mesh_edge_loss import TestMeshEdgeLoss
......@@ -17,8 +17,5 @@ def bm_mesh_edge_loss() -> None:
n, v, f = case
kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f})
benchmark(
TestMeshEdgeLoss.mesh_edge_loss,
"MESH_EDGE_LOSS",
kwargs_list,
warmup_iters=1,
TestMeshEdgeLoss.mesh_edge_loss, "MESH_EDGE_LOSS", kwargs_list, warmup_iters=1
)
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from fvcore.common.benchmark import benchmark
from test_obj_io import TestMeshObjIO
from test_ply_io import TestMeshPlyIO
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_mesh_laplacian_smoothing import TestLaplacianSmoothing
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_mesh_normal_consistency import TestMeshNormalConsistency
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_meshes import TestMeshes
......@@ -20,9 +20,7 @@ def bm_compute_packed_padded_meshes() -> None:
test_cases = product(num_meshes, max_v, max_f, devices)
for case in test_cases:
n, v, f, d = case
kwargs_list.append(
{"num_meshes": n, "max_v": v, "max_f": f, "device": d}
)
kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f, "device": d})
benchmark(
TestMeshes.compute_packed_with_init,
"COMPUTE_PACKED",
......
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_nearest_neighbor_points import TestNearestNeighborPoints
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_packed_to_padded import TestPackedToPadded
......@@ -23,13 +23,7 @@ def bm_packed_to_padded() -> None:
for case in test_cases:
n, v, f, d, b = case
kwargs_list.append(
{
"num_meshes": n,
"num_verts": v,
"num_faces": f,
"num_d": d,
"device": b,
}
{"num_meshes": n, "num_verts": v, "num_faces": f, "num_d": d, "device": b}
)
benchmark(
TestPackedToPadded.packed_to_padded_with_init,
......
......@@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_pointclouds import TestPointclouds
......
......@@ -2,11 +2,12 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_rasterize_meshes import TestRasterizeMeshes
# ico levels:
# 0: (12 verts, 20 faces)
# 1: (42 verts, 80 faces)
......@@ -39,12 +40,7 @@ def bm_rasterize_meshes() -> None:
for case in test_cases:
n, ic, im, b = case
kwargs_list.append(
{
"num_meshes": n,
"ico_level": ic,
"image_size": im,
"blur_radius": b,
}
{"num_meshes": n, "ico_level": ic, "image_size": im, "blur_radius": b}
)
benchmark(
TestRasterizeMeshes.rasterize_meshes_cpu_with_init,
......@@ -63,9 +59,7 @@ def bm_rasterize_meshes() -> None:
test_cases = product(num_meshes, ico_level, image_size, blur, bin_size)
# only keep cases where bin_size == 0 or image_size / bin_size < 16
test_cases = [
elem
for elem in test_cases
if (elem[-1] == 0 or elem[-3] / elem[-1] < 16)
elem for elem in test_cases if (elem[-1] == 0 or elem[-3] / elem[-1] < 16)
]
for case in test_cases:
n, ic, im, b, bn = case
......
......@@ -3,7 +3,6 @@
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.renderer.points.rasterize_points import (
rasterize_points,
rasterize_points_python,
......@@ -40,9 +39,7 @@ def bm_python_vs_cpu() -> None:
{"N": 1, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},
{"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},
]
benchmark(
_bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1
)
benchmark(_bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1)
benchmark(_bm_cpu_with_init, "RASTERIZE_CPU", kwargs_list, warmup_iters=1)
kwargs_list = [
{"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_sample_points_from_meshes import TestSamplePoints
......
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from fvcore.common.benchmark import benchmark
from test_so3 import TestSO3
......
......@@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_subdivide_meshes import TestSubdivideMeshes
......
......@@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_vert_align import TestVertAlign
......@@ -25,8 +25,5 @@ def bm_vert_align() -> None:
)
benchmark(
TestVertAlign.vert_align_with_init,
"VERT_ALIGN",
kwargs_list,
warmup_iters=1,
TestVertAlign.vert_align_with_init, "VERT_ALIGN", kwargs_list, warmup_iters=1
)
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import numpy as np
import torch
......@@ -11,17 +12,13 @@ class TestCaseMixin(unittest.TestCase):
"""
Verify that tensor1 and tensor2 have their data in distinct locations.
"""
self.assertNotEqual(
tensor1.storage().data_ptr(), tensor2.storage().data_ptr()
)
self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertNotSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in the same locations.
"""
self.assertEqual(
tensor1.storage().data_ptr(), tensor2.storage().data_ptr()
)
self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertAllSeparate(self, tensor_list) -> None:
"""
......@@ -57,7 +54,5 @@ class TestCaseMixin(unittest.TestCase):
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
else:
close = np.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
close = np.allclose(input, other, rtol=rtol, atol=atol, equal_nan=equal_nan)
self.assertTrue(close)
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
import numpy as np
import torch
from pytorch3d.renderer.blending import (
BlendParams,
hard_rgb_blend,
......@@ -43,9 +43,7 @@ def sigmoid_blend_naive_loop(colors, fragments, blend_params):
return pixel_colors
def sigmoid_blend_naive_loop_backward(
grad_images, images, fragments, blend_params
):
def sigmoid_blend_naive_loop_backward(grad_images, images, fragments, blend_params):
pix_to_face = fragments.pix_to_face
dists = fragments.dists
sigma = blend_params.sigma
......@@ -135,14 +133,7 @@ class TestBlending(unittest.TestCase):
torch.manual_seed(42)
def _compare_impls(
self,
fn1,
fn2,
args1,
args2,
grad_var1=None,
grad_var2=None,
compare_grads=True,
self, fn1, fn2, args1, args2, grad_var1=None, grad_var2=None, compare_grads=True
):
out1 = fn1(*args1)
......@@ -160,9 +151,7 @@ class TestBlending(unittest.TestCase):
(out2 * grad_out).sum().backward()
self.assertTrue(hasattr(grad_var2, "grad"))
self.assertTrue(
torch.allclose(
grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5
)
torch.allclose(grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5)
)
def test_hard_rgb_blend(self):
......@@ -199,9 +188,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
......@@ -238,9 +225,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
dists2 = dists1.detach().clone()
dists2.requires_grad = True
......@@ -276,9 +261,7 @@ class TestBlending(unittest.TestCase):
# of the image with surrounding padded values.
N, S, K = 1, 8, 2
device = torch.device("cuda")
pix_to_face = -torch.ones(
(N, S, S, K), dtype=torch.int64, device=device
)
pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64, device=device)
h = int(S / 2)
pix_to_face_full = torch.randint(
size=(N, h, h, K), low=0, high=100, device=device
......@@ -294,9 +277,7 @@ class TestBlending(unittest.TestCase):
# randomly flip the sign of the distance
# (-) means inside triangle, (+) means outside triangle.
dists1 = (
torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
)
dists1 = torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
dists2 = dists1.clone()
zbuf2 = zbuf1.clone()
dists1.requires_grad = True
......@@ -353,9 +334,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K), device=device)
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
......@@ -398,15 +377,10 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K), device=device)
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
zbuf=zbuf,
dists=dists1,
pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1 # dummy
)
blend_params = BlendParams(sigma=1e-3)
......
......@@ -3,6 +3,7 @@ import unittest
from collections import Counter
from pathlib import Path
# This file groups together tests which look at the code without running it.
......@@ -61,6 +62,5 @@ class TestBuild(unittest.TestCase):
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect),
f"{i} missing copyright header.",
firstline.endswith(expect), f"{i} missing copyright header."
)
......@@ -26,10 +26,11 @@
# SOFTWARE.
import math
import numpy as np
import unittest
import torch
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.renderer.cameras import (
OpenGLOrthographicCameras,
OpenGLPerspectiveCameras,
......@@ -43,8 +44,6 @@ from pytorch3d.renderer.cameras import (
from pytorch3d.transforms import Transform3d
from pytorch3d.transforms.so3 import so3_exponential_map
from common_testing import TestCaseMixin
# Naive function adapted from SoftRasterizer for test purposes.
def perspective_project_naive(points, fov=60.0):
......@@ -58,9 +57,7 @@ def perspective_project_naive(points, fov=60.0):
coordinate (no z renormalization)
"""
device = points.device
halfFov = torch.tensor(
(fov / 2) / 180 * np.pi, dtype=torch.float32, device=device
)
halfFov = torch.tensor((fov / 2) / 180 * np.pi, dtype=torch.float32, device=device)
scale = torch.tan(halfFov[None])
scale = scale[:, None]
z = points[:, :, 2]
......@@ -150,9 +147,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = 2.7
elev = 90.0
azim = 0.0
expected_position = torch.tensor(
[0.0, 2.7, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
......@@ -171,9 +168,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = torch.tensor(2.7)
elev = torch.tensor(0.0)
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
......@@ -181,9 +178,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = 2.7
elev = torch.tensor(0.0)
azim = 90.0
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
......@@ -228,8 +225,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
elev = torch.tensor([0.0])
azim = torch.tensor([90.0])
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=3e-7)
......@@ -239,8 +235,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
elev = 0.0
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=3e-7)
......@@ -364,9 +359,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
):
cam = cam_type(R=R, T=T)
RT_class = cam.get_world_to_view_transform()
self.assertTrue(
torch.allclose(RT.get_matrix(), RT_class.get_matrix())
)
self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix()))
self.assertTrue(isinstance(RT, Transform3d))
......@@ -539,9 +532,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
# applying the scale puts the z coordinate at the far clipping plane
# so the z is mapped to 1.0
projected_verts = torch.tensor([2, 1, 1], dtype=torch.float32)
cameras = OpenGLOrthographicCameras(
znear=1.0, zfar=10.0, scale_xyz=scale
)
cameras = OpenGLOrthographicCameras(znear=1.0, zfar=10.0, scale_xyz=scale)
P = cameras.get_projection_transform()
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices, scale)
......@@ -578,9 +569,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
far = torch.tensor([10.0])
near = 1.0
scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
cameras = OpenGLOrthographicCameras(
znear=near, zfar=far, scale_xyz=scale
)
cameras = OpenGLOrthographicCameras(znear=near, zfar=far, scale_xyz=scale)
P = cameras.get_projection_transform()
vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
vertices_batch = vertices[None, None, :]
......@@ -683,15 +672,11 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
self.assertClose(v3[..., :2], v2[..., :2])
def test_perspective_kwargs(self):
cameras = SfMPerspectiveCameras(
focal_length=5.0, principal_point=((2.5, 2.5),)
)
cameras = SfMPerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
P = cameras.get_projection_transform(
focal_length=2.0, principal_point=((2.5, 3.5),)
)
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(
vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5
)
v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5)
self.assertClose(v1, v2)
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn.functional as F
from pytorch3d.loss import chamfer_distance
from common_testing import TestCaseMixin
from pytorch3d.loss import chamfer_distance
class TestChamfer(TestCaseMixin, unittest.TestCase):
......@@ -19,14 +18,10 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
"""
device = torch.device("cuda:0")
p1 = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device)
p1_normals = torch.rand(
(batch_size, P1, 3), dtype=torch.float32, device=device
)
p1_normals = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device)
p1_normals = p1_normals / p1_normals.norm(dim=2, p=2, keepdim=True)
p2 = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device)
p2_normals = torch.rand(
(batch_size, P2, 3), dtype=torch.float32, device=device
)
p2_normals = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device)
p2_normals = p2_normals / p2_normals.norm(dim=2, p=2, keepdim=True)
weights = torch.rand((batch_size,), dtype=torch.float32, device=device)
......@@ -47,9 +42,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
for n in range(N):
for i1 in range(P1):
for i2 in range(P2):
dist[n, i1, i2] = torch.sum(
(p1[n, i1, :] - p2[n, i2, :]) ** 2
)
dist[n, i1, i2] = torch.sum((p1[n, i1, :] - p2[n, i2, :]) ** 2)
loss = [
torch.min(dist, dim=2)[0], # (N, P1)
......@@ -146,11 +139,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
# Error when point_reduction = "none" and batch_reduction = "none".
with self.assertRaises(ValueError):
chamfer_distance(
p1,
p2,
weights=weights,
batch_reduction="none",
point_reduction="none",
p1, p2, weights=weights, batch_reduction="none", point_reduction="none"
)
# Error when batch_reduction is not in ["none", "mean", "sum"].
......@@ -339,9 +328,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
@staticmethod
def chamfer_with_init(
batch_size: int, P1: int, P2: int, return_normals: bool
):
def chamfer_with_init(batch_size: int, P1: int, P2: int, return_normals: bool):
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
batch_size, P1, P2
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment