Unverified Commit 29c59cab authored by Matthew Tancik's avatar Matthew Tancik Committed by GitHub
Browse files

add code checks and linting (#28)

* add code checks and linting

* Always clean docs folder
parent 1bcf42f1
name: Core Tests.
on:
push:
branches: [master]
pull_request:
branches: [master]
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.8.12
uses: actions/setup-python@v4
with:
python-version: "3.8.12"
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: ${{ env.pythonLocation }}-${{ hashFiles('pyproject.toml') }}
- name: Install dependencies
run: |
pip install --upgrade --upgrade-strategy eager -e .[dev]
- name: Run isort
run: isort docs/ nerfacc/ scripts/ tests/ --profile black --check
- name: Run Black
run: black docs/ nerfacc/ scripts/ tests/ --check
- name: Python Pylint
run: |
pylint nerfacc tests scripts
"""nerfacc - A Python package for the fast volumetric rendering."""
from .occupancy_field import OccupancyField
from .utils import (
ray_aabb_intersect,
......
""" Occupancy field for accelerating volumetric rendering. """
from typing import Callable, List, Tuple, Union
import torch
......@@ -100,7 +101,9 @@ class OccupancyField(nn.Module):
self.register_buffer("occ_grid_binary", occ_grid_binary)
# Grid coords & indices
grid_coords = meshgrid3d(self.resolution).reshape(self.num_cells, self.num_dim)
grid_coords = meshgrid3d(self.resolution).reshape(
self.num_cells, self.num_dim
)
self.register_buffer("grid_coords", grid_coords)
grid_indices = torch.arange(self.num_cells)
self.register_buffer("grid_indices", grid_indices)
......@@ -145,12 +148,16 @@ class OccupancyField(nn.Module):
x = (
grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)
) / self.resolution_tensor
bb_min, bb_max = torch.split(self.aabb, [self.num_dim, self.num_dim], dim=0)
bb_min, bb_max = torch.split(
self.aabb, [self.num_dim, self.num_dim], dim=0
)
x = x * (bb_max - bb_min) + bb_min
occ = self.occ_eval_fn(x).squeeze(-1)
# ema update
self.occ_grid[indices] = torch.maximum(self.occ_grid[indices] * ema_decay, occ)
self.occ_grid[indices] = torch.maximum(
self.occ_grid[indices] * ema_decay, occ
)
# suppose to use scatter max but emperically it is almost the same.
# self.occ_grid, _ = scatter_max(
# occ, indices, dim=0, out=self.occ_grid * ema_decay
......@@ -174,7 +181,9 @@ class OccupancyField(nn.Module):
), "The samples are not drawn from a proper space!"
resolution = torch.tensor(self.resolution).to(self.occ_grid.device)
bb_min, bb_max = torch.split(self.aabb, [self.num_dim, self.num_dim], dim=0)
bb_min, bb_max = torch.split(
self.aabb, [self.num_dim, self.num_dim], dim=0
)
x = (x - bb_min) / (bb_max - bb_min)
selector = ((x > 0.0) & (x < 1.0)).all(dim=-1)
......@@ -193,7 +202,9 @@ class OccupancyField(nn.Module):
raise NotImplementedError("Currently only supports 2D or 3D field.")
occs = torch.zeros(x.shape[:-1], device=x.device)
occs[selector] = self.occ_grid[grid_indices[selector]]
occs_binary = torch.zeros(x.shape[:-1], device=x.device, dtype=torch.bool)
occs_binary = torch.zeros(
x.shape[:-1], device=x.device, dtype=torch.bool
)
occs_binary[selector] = self.occ_grid_binary[grid_indices[selector]]
return occs, occs_binary
......@@ -236,3 +247,14 @@ class OccupancyField(nn.Module):
ema_decay=ema_decay,
warmup_steps=warmup_steps,
)
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the occupancy, given samples.
Args:
x: Samples with shape (..., 2) or (..., 3).
Returns:
float and binary occupancy values with shape (...) respectively.
"""
return self.query_occ(x)
from typing import List, Optional, Tuple
""" Volumetric rendering utilities. """
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
......@@ -92,7 +93,11 @@ def volumetric_marching(
if stratified:
t_min = t_min + torch.rand_like(t_min) * render_step_size
packed_info, frustum_starts, frustum_ends = nerfacc_cuda.volumetric_marching(
(
packed_info,
frustum_starts,
frustum_ends,
) = nerfacc_cuda.volumetric_marching(
# rays
rays_o.contiguous(),
rays_d.contiguous(),
......@@ -153,7 +158,10 @@ def volumetric_rendering_steps(
frustum_starts = frustum_starts.contiguous()
frustum_ends = frustum_ends.contiguous()
sigmas = sigmas.contiguous()
compact_packed_info, compact_selector = nerfacc_cuda.volumetric_rendering_steps(
(
compact_packed_info,
compact_selector,
) = nerfacc_cuda.volumetric_rendering_steps(
packed_info, frustum_starts, frustum_ends, sigmas
)
compact_frustum_starts = frustum_starts[compact_selector]
......@@ -202,7 +210,7 @@ def volumetric_rendering_weights(
frustum_starts = frustum_starts.contiguous()
frustum_ends = frustum_ends.contiguous()
sigmas = sigmas.contiguous()
weights = _volumetric_rendering_weights.apply(
weights = _VolumetricRenderingWeights.apply(
packed_info, frustum_starts, frustum_ends, sigmas
)
else:
......@@ -280,9 +288,11 @@ def unpack_to_ray_indices(packed_info: Tensor) -> Tensor:
return ray_indices
class _volumetric_rendering_weights(torch.autograd.Function):
class _VolumetricRenderingWeights(torch.autograd.Function):
@staticmethod
def forward(ctx, packed_info, frustum_starts, frustum_ends, sigmas):
def forward(
ctx, packed_info, frustum_starts, frustum_ends, sigmas
): # pylint: disable=arguments-differ
weights = nerfacc_cuda.volumetric_rendering_weights_forward(
packed_info, frustum_starts, frustum_ends, sigmas
)
......@@ -296,7 +306,7 @@ class _volumetric_rendering_weights(torch.autograd.Function):
return weights
@staticmethod
def backward(ctx, grad_weights):
def backward(ctx, grad_weights): # pylint: disable=arguments-differ
(
packed_info,
frustum_starts,
......@@ -313,3 +323,7 @@ class _volumetric_rendering_weights(torch.autograd.Function):
sigmas,
)
return None, None, None, grad_sigmas
@staticmethod
def jvp(ctx: Any, *grad_inputs: Any) -> Any:
raise NotImplementedError("Not implemented.")
""" Full volumetric rendering pipeline. """
from typing import Callable, List, Optional, Tuple
import torch
......@@ -102,10 +103,10 @@ def volumetric_rendering_pipeline(
# Query sigma and color with gradients
rgbs, sigmas = rgb_sigma_fn(frustum_starts, frustum_ends, ray_indices)
assert rgbs.shape[-1] == 3, "rgbs must have 3 channels, got {}".format(rgbs.shape)
assert sigmas.shape[-1] == 1, "sigmas must have 1 channel, got {}".format(
sigmas.shape
)
assert rgbs.shape[-1] == 3, f"rgbs must have 3 channels, got {rgbs.shape}"
assert (
sigmas.shape[-1] == 1
), f"sigmas must have 1 channel, got {sigmas.shape}"
# Rendering: compute weights and ray indices.
weights = volumetric_rendering_weights(
......
......@@ -15,14 +15,55 @@ dependencies = [
"rich>=12"
]
[project.optional-dependencies]
[tool.setuptools.package-data]
"*" = ["*.cu", "*.cpp", "*.h"]
[project.optional-dependencies]
# for development
dev = [
"black",
"isort",
"pytest",
]
\ No newline at end of file
"pylint",
"pytest-xdist==2.5.0",
"pyyaml",
"typeguard>=2.13.3",
]
# black
[tool.black]
line-length = 80
# pylint
[tool.pylint.messages_control]
max-line-length = 80
generated-members = ["numpy.*", "torch.*", "cv2.*", "cv.*"]
good-names-rgxs = "^[_a-zA-Z][_a-z0-9]?$"
ignore-paths = ["^tests/.*$", "^nerfacc/cuda/.*$", "^tests/cuda/.*$"]
jobs = 0
disable = [
"line-too-long",
"too-many-arguments",
"too-many-locals",
]
#pytest
[tool.pytest.ini_options]
addopts = "-n=4 --typeguard-packages=nerfacc --disable-warnings --ignore=tests/cuda"
testpaths = [
"tests",
]
# pyright
[tool.pyright]
include = ["nerfacc"]
exclude = ["**/node_modules",
"**/__pycache__",
]
ignore = ["nerfacc/cuda"]
defineConstant = { DEBUG = true }
pythonVersion = "3.8"
pythonPlatform = "Linux"
\ No newline at end of file
#!/usr/bin/env python
"""Simple yaml debugger"""
import subprocess
import yaml
from rich.console import Console
from rich.style import Style
console = Console(width=120)
LOCAL_TESTS = [
"Run license checks",
"Run isort",
"Run Black",
"Python Pylint",
"Test with pytest",
]
def run_command(command: str) -> bool:
"""Run a command kill actions if it fails
Args:
command: command to run
continue_on_fail: whether to continue running commands if the current one fails.
"""
ret_code = subprocess.call(command, shell=True)
if ret_code != 0:
console.print(f"[bold red]Error: `{command}` failed.")
return ret_code == 0
def run_github_actions_file(filename: str):
"""Run a github actions file locally.
Args:
filename: Which yml github actions file to run.
"""
with open(filename, "rb") as f:
my_dict = yaml.safe_load(f)
steps = my_dict["jobs"]["build"]["steps"]
success = True
for step in steps:
if "name" in step and step["name"] in LOCAL_TESTS:
compressed = step["run"].replace("\n", ";").replace("\\", "")
compressed = compressed.replace("--check", "")
curr_command = f"{compressed}"
console.line()
console.rule(f"[bold green]Running: {curr_command}")
success = success and run_command(curr_command)
else:
skip_name = step["name"] if "name" in step else step["uses"]
console.print(f"Skipping {skip_name}")
# Code Testing
console.line()
console.rule("[bold green]Running pytest")
success = success and run_command("pytest")
# Add checks for building documentation
console.line()
console.rule("[bold green]Building Documentation")
success = success and run_command(
"cd docs/; make clean; make html SPHINXOPTS='-W;'"
)
if success:
console.line()
console.rule(characters="=")
console.print(
"[bold green]:TADA: :TADA: :TADA: ALL CHECKS PASSED :TADA: :TADA: :TADA:",
justify="center",
)
console.rule(characters="=")
else:
console.line()
console.rule(characters="=", style=Style(color="red"))
console.print(
"[bold red]:skull: :skull: :skull: ERRORS FOUND :skull: :skull: :skull:",
justify="center",
)
console.rule(characters="=", style=Style(color="red"))
if __name__ == "__main__":
run_github_actions_file(filename=".github/workflows/code_checks.yml")
......@@ -11,9 +11,9 @@ def sigma_fn(frustum_starts, frustum_ends, ray_indices):
def rgb_sigma_fn(frustum_starts, frustum_ends, ray_indices):
return torch.rand((frustum_ends.shape[0], 3), device=device), torch.rand_like(
frustum_ends
)
return torch.rand(
(frustum_ends.shape[0], 3), device=device
), torch.rand_like(frustum_ends)
def test_rendering():
......
......@@ -2,6 +2,7 @@ import torch
import tqdm
from nerfacc import (
unpack_to_ray_indices,
volumetric_marching,
volumetric_rendering_accumulate,
volumetric_rendering_steps,
......@@ -19,13 +20,7 @@ def test_rendering():
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
for step in tqdm.tqdm(range(1000)):
(
packed_info,
frustum_origins,
frustum_dirs,
frustum_starts,
frustum_ends,
) = volumetric_marching(
(packed_info, frustum_starts, frustum_ends,) = volumetric_marching(
rays_o,
rays_d,
aabb=scene_aabb,
......@@ -39,26 +34,23 @@ def test_rendering():
packed_info,
frustum_starts,
frustum_ends,
frustum_origins,
frustum_dirs,
) = volumetric_rendering_steps(
packed_info,
sigmas,
frustum_starts,
frustum_ends,
frustum_origins,
frustum_dirs,
)
ray_indices = unpack_to_ray_indices(packed_info)
weights, ray_indices = volumetric_rendering_weights(
sigmas = torch.rand_like(frustum_ends[:, :1], requires_grad=True) * 100
values = torch.rand_like(frustum_starts, requires_grad=True)
weights = volumetric_rendering_weights(
packed_info,
sigmas,
frustum_starts,
frustum_ends,
)
values = torch.rand_like(sigmas, requires_grad=True)
accum_values = volumetric_rendering_accumulate(
weights,
ray_indices,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment