Unverified Commit d67b8c32 authored by Ruilong Li(李瑞龙)'s avatar Ruilong Li(李瑞龙) Committed by GitHub
Browse files

Reformatting and Update Tests (#46)

* more organized version

* add copyrights

* proper versioning

* proper versioning

* cleanup project; read version from pyproject

* read version from pyproject

* update weblink

* cleanup nerfacc file structure

* propoer test contraction

* proper test for intersection

* proper tests for pack, grid, and intersection

* proper testing for rendering

* bug fix

* proper testing for marching

* run check reformat

* add hints on nvcc not found

* add doc to readme

* resume github check

* update readthedocs env

* rm tool.setuptools.packages.find
parent 300ec71a
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#pragma once #pragma once
#include <torch/extension.h> #include <torch/extension.h>
......
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
* Modified by Ruilong Li, 2022
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#include "include/helpers_cuda.h" #include "include/helpers_cuda.h"
template <typename scalar_t> template <typename scalar_t>
......
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#include "include/helpers_cuda.h"
__global__ void ray_indices_kernel(
// input
const int n_rays,
const int *packed_info,
// output
int *ray_indices)
{
CUDA_GET_THREAD_ID(i, n_rays);
// locate
const int base = packed_info[i * 2 + 0]; // point idx start.
const int steps = packed_info[i * 2 + 1]; // point idx shift.
if (steps == 0)
return;
ray_indices += base;
for (int j = 0; j < steps; ++j)
{
ray_indices[j] = i;
}
}
torch::Tensor unpack_to_ray_indices(const torch::Tensor packed_info)
{
DEVICE_GUARD(packed_info);
CHECK_INPUT(packed_info);
const int n_rays = packed_info.size(0);
const int threads = 256;
const int blocks = CUDA_N_BLOCKS_NEEDED(n_rays, threads);
int n_samples = packed_info[n_rays - 1].sum(0).item<int>();
torch::Tensor ray_indices = torch::zeros(
{n_samples}, packed_info.options().dtype(torch::kInt32));
ray_indices_kernel<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
n_rays,
packed_info.data_ptr<int>(),
ray_indices.data_ptr<int>());
return ray_indices;
}
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#include "include/helpers_cuda.h" #include "include/helpers_cuda.h"
#include "include/helpers_math.h" #include "include/helpers_math.h"
#include "include/helpers_contraction.h" #include "include/helpers_contraction.h"
......
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#include "include/helpers_cuda.h" #include "include/helpers_cuda.h"
#include "include/helpers_math.h" #include "include/helpers_math.h"
#include "include/helpers_contraction.h" #include "include/helpers_contraction.h"
...@@ -275,53 +279,6 @@ std::vector<torch::Tensor> ray_marching( ...@@ -275,53 +279,6 @@ std::vector<torch::Tensor> ray_marching(
return {packed_info, t_starts, t_ends}; return {packed_info, t_starts, t_ends};
} }
// -----------------------------------------------------------------------------
// Ray index for each sample
// -----------------------------------------------------------------------------
__global__ void ray_indices_kernel(
// input
const int n_rays,
const int *packed_info,
// output
int *ray_indices)
{
CUDA_GET_THREAD_ID(i, n_rays);
// locate
const int base = packed_info[i * 2 + 0]; // point idx start.
const int steps = packed_info[i * 2 + 1]; // point idx shift.
if (steps == 0)
return;
ray_indices += base;
for (int j = 0; j < steps; ++j)
{
ray_indices[j] = i;
}
}
torch::Tensor unpack_to_ray_indices(const torch::Tensor packed_info)
{
DEVICE_GUARD(packed_info);
CHECK_INPUT(packed_info);
const int n_rays = packed_info.size(0);
const int threads = 256;
const int blocks = CUDA_N_BLOCKS_NEEDED(n_rays, threads);
int n_samples = packed_info[n_rays - 1].sum(0).item<int>();
torch::Tensor ray_indices = torch::zeros(
{n_samples}, packed_info.options().dtype(torch::kInt32));
ray_indices_kernel<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
n_rays,
packed_info.data_ptr<int>(),
ray_indices.data_ptr<int>());
return ray_indices;
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Query the occupancy grid // Query the occupancy grid
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
......
/*
* Copyright (c) 2022 Ruilong Li, UC Berkeley.
*/
#include "include/helpers_cuda.h" #include "include/helpers_cuda.h"
template <typename scalar_t> template <typename scalar_t>
......
"""
Copyright (c) 2022 Ruilong Li @ UC Berkeley
"""
from typing import Callable, List, Union from typing import Callable, List, Union
import torch import torch
......
"""
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
from typing import Tuple
import torch
from torch import Tensor
import nerfacc.cuda as _C
@torch.no_grad()
def ray_aabb_intersect(
rays_o: Tensor, rays_d: Tensor, aabb: Tensor
) -> Tuple[Tensor, Tensor]:
"""Ray AABB Test.
Note:
this function is not differentiable to any inputs.
Args:
rays_o: Ray origins of shape (n_rays, 3).
rays_d: Normalized ray directions of shape (n_rays, 3).
aabb: Scene bounding box {xmin, ymin, zmin, xmax, ymax, zmax}. \
Tensor with shape (6)
Returns:
Ray AABB intersection {t_min, t_max} with shape (n_rays) respectively. \
Note the t_min is clipped to minimum zero. 1e10 means no intersection.
Examples:
.. code-block:: python
aabb = torch.tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0], device="cuda:0")
rays_o = torch.rand((128, 3), device="cuda:0")
rays_d = torch.randn((128, 3), device="cuda:0")
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
t_min, t_max = ray_aabb_intersect(rays_o, rays_d, aabb)
"""
if rays_o.is_cuda and rays_d.is_cuda and aabb.is_cuda:
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
aabb = aabb.contiguous()
t_min, t_max = _C.ray_aabb_intersect(rays_o, rays_d, aabb)
else:
raise NotImplementedError("Only support cuda inputs.")
return t_min, t_max
"""
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
import torch
from torch import Tensor
import nerfacc.cuda as _C
@torch.no_grad()
def unpack_to_ray_indices(packed_info: Tensor) -> Tensor:
"""Unpack `packed_info` to `ray_indices`. Useful for converting per ray data to per sample data.
Note:
this function is not differentiable to any inputs.
Args:
packed_info: Stores information on which samples belong to the same ray. \
See :func:`nerfacc.ray_marching` for details. Tensor with shape (n_rays, 2).
Returns:
Ray index of each sample. LongTensor with shape (n_sample).
Examples:
.. code-block:: python
rays_o = torch.rand((128, 3), device="cuda:0")
rays_d = torch.randn((128, 3), device="cuda:0")
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
# Ray marching with near far plane.
packed_info, t_starts, t_ends = ray_marching(
rays_o, rays_d, near_plane=0.1, far_plane=1.0, render_step_size=1e-3
)
# torch.Size([128, 2]) torch.Size([115200, 1]) torch.Size([115200, 1])
print(packed_info.shape, t_starts.shape, t_ends.shape)
# Unpack per-ray info to per-sample info.
ray_indices = unpack_to_ray_indices(packed_info)
# torch.Size([115200]) torch.int64
print(ray_indices.shape, ray_indices.dtype)
"""
if packed_info.is_cuda:
ray_indices = _C.unpack_to_ray_indices(packed_info.contiguous().int())
else:
raise NotImplementedError("Only support cuda inputs.")
return ray_indices.long()
from typing import Callable, Optional, Tuple
import torch
from .ray_marching import unpack_to_ray_indices
from .vol_rendering import accumulate_along_rays, render_weight_from_density
def rendering(
# radiance field
rgb_sigma_fn: Callable,
# ray marching results
packed_info: torch.Tensor,
t_starts: torch.Tensor,
t_ends: torch.Tensor,
# rendering options
early_stop_eps: float = 1e-4,
alpha_thre: float = 1e-2,
render_bkgd: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Render the rays through the radience field defined by `rgb_sigma_fn`.
This function is differentiable to the outputs of `rgb_sigma_fn` so it can be used for
gradient-based optimization.
Warning:
This function is not differentiable to `t_starts`, `t_ends`.
Args:
rgb_sigma_fn: A function that takes in samples {t_starts (N, 1), t_ends (N, 1), \
ray indices (N,)} and returns the post-activation rgb (N, 3) and density \
values (N, 1).
packed_info: Packed ray marching info. See :func:`ray_marching` for details.
t_starts: Per-sample start distance. Tensor with shape (n_samples, 1).
t_ends: Per-sample end distance. Tensor with shape (n_samples, 1).
early_stop_eps: Early stop threshold during trasmittance accumulation. Default: 1e-4.
alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.
render_bkgd: Optional. Background color. Tensor with shape (3,).
Returns:
Ray colors (n_rays, 3), opacities (n_rays, 1) and depths (n_rays, 1).
Examples:
.. code-block:: python
import torch
from nerfacc import OccupancyGrid, ray_marching, rendering
device = "cuda:0"
batch_size = 128
rays_o = torch.rand((batch_size, 3), device=device)
rays_d = torch.randn((batch_size, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
# Ray marching.
packed_info, t_starts, t_ends = ray_marching(
rays_o, rays_d, near_plane=0.1, far_plane=1.0, render_step_size=1e-3
)
# Rendering.
def rgb_sigma_fn(t_starts, t_ends, ray_indices):
# This is a dummy function that returns random values.
rgbs = torch.rand((t_starts.shape[0], 3), device=device)
sigmas = torch.rand((t_starts.shape[0], 1), device=device)
return rgbs, sigmas
colors, opacities, depths = rendering(rgb_sigma_fn, packed_info, t_starts, t_ends)
# torch.Size([128, 3]) torch.Size([128, 1]) torch.Size([128, 1])
print(colors.shape, opacities.shape, depths.shape)
"""
n_rays = packed_info.shape[0]
ray_indices = unpack_to_ray_indices(packed_info)
# Query sigma and color with gradients
rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)
assert rgbs.shape[-1] == 3, "rgbs must have 3 channels, got {}".format(
rgbs.shape
)
assert (
sigmas.shape == t_starts.shape
), "sigmas must have shape of (N, 1)! Got {}".format(sigmas.shape)
# Rendering: compute weights and ray indices.
weights = render_weight_from_density(
packed_info, t_starts, t_ends, sigmas, early_stop_eps, alpha_thre
)
# Rendering: accumulate rgbs, opacities, and depths along the rays.
colors = accumulate_along_rays(
weights, ray_indices, values=rgbs, n_rays=n_rays
)
opacities = accumulate_along_rays(
weights, ray_indices, values=None, n_rays=n_rays
)
depths = accumulate_along_rays(
weights,
ray_indices,
values=(t_starts + t_ends) / 2.0,
n_rays=n_rays,
)
# Background composition.
if render_bkgd is not None:
colors = colors + render_bkgd * (1.0 - opacities)
return colors, opacities, depths
from typing import Callable, Optional, Tuple from typing import Callable, Optional, Tuple
import torch import torch
from torch import Tensor
import nerfacc.cuda as _C import nerfacc.cuda as _C
from nerfacc.contraction import ContractionType
from .contraction import ContractionType
from .grid import Grid from .grid import Grid
from .intersection import ray_aabb_intersect
from .pack import unpack_to_ray_indices
from .vol_rendering import render_visibility from .vol_rendering import render_visibility
@torch.no_grad()
def ray_aabb_intersect(
rays_o: Tensor, rays_d: Tensor, aabb: Tensor
) -> Tuple[Tensor, Tensor]:
"""Ray AABB Test.
Note:
this function is not differentiable to any inputs.
Args:
rays_o: Ray origins of shape (n_rays, 3).
rays_d: Normalized ray directions of shape (n_rays, 3).
aabb: Scene bounding box {xmin, ymin, zmin, xmax, ymax, zmax}. \
Tensor with shape (6)
Returns:
Ray AABB intersection {t_min, t_max} with shape (n_rays) respectively. \
Note the t_min is clipped to minimum zero. 1e10 means no intersection.
Examples:
.. code-block:: python
aabb = torch.tensor([0.0, 0.0, 0.0, 1.0, 1.0, 1.0], device="cuda:0")
rays_o = torch.rand((128, 3), device="cuda:0")
rays_d = torch.randn((128, 3), device="cuda:0")
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
t_min, t_max = ray_aabb_intersect(rays_o, rays_d, aabb)
"""
if rays_o.is_cuda and rays_d.is_cuda and aabb.is_cuda:
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
aabb = aabb.contiguous()
t_min, t_max = _C.ray_aabb_intersect(rays_o, rays_d, aabb)
else:
raise NotImplementedError("Only support cuda inputs.")
return t_min, t_max
@torch.no_grad()
def unpack_to_ray_indices(packed_info: Tensor) -> Tensor:
"""Unpack `packed_info` to `ray_indices`. Useful for converting per ray data to per sample data.
Note:
this function is not differentiable to any inputs.
Args:
packed_info: Stores information on which samples belong to the same ray. \
See :func:`nerfacc.ray_marching` for details. Tensor with shape (n_rays, 2).
Returns:
Ray index of each sample. LongTensor with shape (n_sample).
Examples:
.. code-block:: python
rays_o = torch.rand((128, 3), device="cuda:0")
rays_d = torch.randn((128, 3), device="cuda:0")
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
# Ray marching with near far plane.
packed_info, t_starts, t_ends = ray_marching(
rays_o, rays_d, near_plane=0.1, far_plane=1.0, render_step_size=1e-3
)
# torch.Size([128, 2]) torch.Size([115200, 1]) torch.Size([115200, 1])
print(packed_info.shape, t_starts.shape, t_ends.shape)
# Unpack per-ray info to per-sample info.
ray_indices = unpack_to_ray_indices(packed_info)
# torch.Size([115200]) torch.int64
print(ray_indices.shape, ray_indices.dtype)
"""
if packed_info.is_cuda:
ray_indices = _C.unpack_to_ray_indices(packed_info.contiguous())
else:
raise NotImplementedError("Only support cuda inputs.")
return ray_indices.long()
@torch.no_grad() @torch.no_grad()
def ray_marching( def ray_marching(
# rays # rays
rays_o: Tensor, rays_o: torch.Tensor,
rays_d: Tensor, rays_d: torch.Tensor,
t_min: Optional[Tensor] = None, t_min: Optional[torch.Tensor] = None,
t_max: Optional[Tensor] = None, t_max: Optional[torch.Tensor] = None,
# bounding box of the scene # bounding box of the scene
scene_aabb: Optional[Tensor] = None, scene_aabb: Optional[torch.Tensor] = None,
# binarized grid for skipping empty space # binarized grid for skipping empty space
grid: Optional[Grid] = None, grid: Optional[Grid] = None,
# sigma function for skipping invisible space # sigma function for skipping invisible space
......
"""
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
from importlib.metadata import version
__version__ = version("nerfacc")
from typing import Optional, Tuple """
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
from typing import Callable, Optional, Tuple
import torch import torch
from torch import Tensor from torch import Tensor
import nerfacc.cuda as _C import nerfacc.cuda as _C
from .pack import unpack_to_ray_indices
def rendering(
# radiance field
rgb_sigma_fn: Callable,
# ray marching results
packed_info: torch.Tensor,
t_starts: torch.Tensor,
t_ends: torch.Tensor,
# rendering options
early_stop_eps: float = 1e-4,
alpha_thre: float = 1e-2,
render_bkgd: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Render the rays through the radience field defined by `rgb_sigma_fn`.
This function is differentiable to the outputs of `rgb_sigma_fn` so it can be used for
gradient-based optimization.
Warning:
This function is not differentiable to `t_starts`, `t_ends`.
Args:
rgb_sigma_fn: A function that takes in samples {t_starts (N, 1), t_ends (N, 1), \
ray indices (N,)} and returns the post-activation rgb (N, 3) and density \
values (N, 1).
packed_info: Packed ray marching info. See :func:`ray_marching` for details.
t_starts: Per-sample start distance. Tensor with shape (n_samples, 1).
t_ends: Per-sample end distance. Tensor with shape (n_samples, 1).
early_stop_eps: Early stop threshold during trasmittance accumulation. Default: 1e-4.
alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.
render_bkgd: Optional. Background color. Tensor with shape (3,).
Returns:
Ray colors (n_rays, 3), opacities (n_rays, 1) and depths (n_rays, 1).
Examples:
.. code-block:: python
import torch
from nerfacc import OccupancyGrid, ray_marching, rendering
device = "cuda:0"
batch_size = 128
rays_o = torch.rand((batch_size, 3), device=device)
rays_d = torch.randn((batch_size, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
# Ray marching.
packed_info, t_starts, t_ends = ray_marching(
rays_o, rays_d, near_plane=0.1, far_plane=1.0, render_step_size=1e-3
)
# Rendering.
def rgb_sigma_fn(t_starts, t_ends, ray_indices):
# This is a dummy function that returns random values.
rgbs = torch.rand((t_starts.shape[0], 3), device=device)
sigmas = torch.rand((t_starts.shape[0], 1), device=device)
return rgbs, sigmas
colors, opacities, depths = rendering(rgb_sigma_fn, packed_info, t_starts, t_ends)
# torch.Size([128, 3]) torch.Size([128, 1]) torch.Size([128, 1])
print(colors.shape, opacities.shape, depths.shape)
"""
n_rays = packed_info.shape[0]
ray_indices = unpack_to_ray_indices(packed_info)
# Query sigma and color with gradients
rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)
assert rgbs.shape[-1] == 3, "rgbs must have 3 channels, got {}".format(
rgbs.shape
)
assert (
sigmas.shape == t_starts.shape
), "sigmas must have shape of (N, 1)! Got {}".format(sigmas.shape)
# Rendering: compute weights and ray indices.
weights = render_weight_from_density(
packed_info, t_starts, t_ends, sigmas, early_stop_eps, alpha_thre
)
# Rendering: accumulate rgbs, opacities, and depths along the rays.
colors = accumulate_along_rays(
weights, ray_indices, values=rgbs, n_rays=n_rays
)
opacities = accumulate_along_rays(
weights, ray_indices, values=None, n_rays=n_rays
)
depths = accumulate_along_rays(
weights,
ray_indices,
values=(t_starts + t_ends) / 2.0,
n_rays=n_rays,
)
# Background composition.
if render_bkgd is not None:
colors = colors + render_bkgd * (1.0 - opacities)
return colors, opacities, depths
def accumulate_along_rays( def accumulate_along_rays(
weights: Tensor, weights: Tensor,
...@@ -135,7 +243,7 @@ def render_weight_from_alpha( ...@@ -135,7 +243,7 @@ def render_weight_from_alpha(
alphas, alphas,
early_stop_eps: float = 1e-4, early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0, alpha_thre: float = 0.0,
) -> Tuple[torch.Tensor, ...]: ) -> torch.Tensor:
"""Compute transmittance weights from density. """Compute transmittance weights from density.
Args: Args:
......
...@@ -4,34 +4,62 @@ build-backend = "setuptools.build_meta" ...@@ -4,34 +4,62 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "nerfacc" name = "nerfacc"
version = "0.1.4-1" version = "0.1.5"
description = "A General NeRF Acceleration Toolbox."
readme = "README.md"
authors = [{name = "Ruilong", email = "ruilongli94@gmail.com"}] authors = [{name = "Ruilong", email = "ruilongli94@gmail.com"}]
license = { text="MIT" } license = { text="MIT" }
requires-python = ">=3.8" requires-python = ">=3.8"
dependencies = [ dependencies = [
"ninja>=1.10.2.3", "ninja>=1.10.2.3",
"pybind11>=2.10.0", "pybind11>=2.10.0",
"torch>=1.12.1", "torch>=1.12.0",
"rich>=12" "rich>=12"
] ]
# [options]
# equivalent to using --extra-index-url with pip,
# which is needed for specifying the CUDA version for torch
# dependency_links = [
# "https://download.pytorch.org/whl/cu116"
# ]
[tool.setuptools.package-data] [tool.setuptools.package-data]
"*" = ["*.cu", "*.cpp", "*.h"] "*" = ["*.cu", "*.cpp", "*.h"]
[project.optional-dependencies] [project.urls]
"Documentation" = "https://www.nerfacc.com/en/latest/"
# for development [project.optional-dependencies]
# Development packages
dev = [ dev = [
"black", "black[jupyter]==22.3.0",
"isort", "isort==5.10.1",
"pytest", "pylint==2.13.4",
"pylint", "pytest==7.1.2",
"pytest-xdist==2.5.0", "pytest-xdist==2.5.0",
"pyyaml",
"typeguard>=2.13.3", "typeguard>=2.13.3",
"pyyaml==6.0",
]
# Documentation packages
docs = [
"pytorch_sphinx_theme @ git+https://github.com/liruilong940607/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme",
"sphinx==5.2.1",
"sphinx-copybutton==0.5.0",
"sphinx-design==0.2.0"
] ]
# Example packages
examples = [
"tinycudann @ git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch",
"imageio",
"opencv-python",
"numpy",
"tqdm"
]
# [tool.setuptools.packages.find]
# include = ["nerfacc", "scripts"]
# black
[tool.black] [tool.black]
line-length = 80 line-length = 80
...@@ -40,28 +68,29 @@ multi_line_output = 3 ...@@ -40,28 +68,29 @@ multi_line_output = 3
line_length = 80 line_length = 80
include_trailing_comma = true include_trailing_comma = true
# pylint
[tool.pylint.messages_control] [tool.pylint.messages_control]
max-line-length = 80 max-line-length = 80
generated-members = ["numpy.*", "torch.*", "cv2.*", "cv.*"] generated-members = ["numpy.*", "torch.*", "cv2.*", "cv.*"]
good-names-rgxs = "^[_a-zA-Z][_a-z0-9]?$" good-names-rgxs = "^[_a-zA-Z][_a-z0-9]?$"
ignore-paths = ["^tests/.*$", "^nerfacc/cuda/.*$", "^tests/cuda/.*$"] ignore-paths = ["^tests/.*$", "examples/pycolmap"]
jobs = 0 jobs = 0
disable = [ disable = [
"line-too-long", "duplicate-code",
"fixme",
"logging-fstring-interpolation",
"too-many-arguments", "too-many-arguments",
"too-many-branches",
"too-many-instance-attributes",
"too-many-locals", "too-many-locals",
"unnecessary-ellipsis",
] ]
#pytest
[tool.pytest.ini_options] [tool.pytest.ini_options]
addopts = "-n=4 --typeguard-packages=nerfacc --disable-warnings --ignore=tests/cuda" addopts = "-n=4 --typeguard-packages=nerfacc --disable-warnings"
testpaths = [ testpaths = [
"tests", "tests",
] ]
# pyright
[tool.pyright] [tool.pyright]
include = ["nerfacc"] include = ["nerfacc"]
exclude = ["**/node_modules", exclude = ["**/node_modules",
...@@ -70,5 +99,5 @@ exclude = ["**/node_modules", ...@@ -70,5 +99,5 @@ exclude = ["**/node_modules",
ignore = ["nerfacc/cuda"] ignore = ["nerfacc/cuda"]
defineConstant = { DEBUG = true } defineConstant = { DEBUG = true }
pythonVersion = "3.8" pythonVersion = "3.9"
pythonPlatform = "Linux" pythonPlatform = "Linux"
\ No newline at end of file
import pytest import pytest
import torch import torch
from nerfacc.contraction import ContractionType, contract, contract_inv import nerfacc.cuda as _C
from nerfacc import ContractionType, contract, contract_inv
device = "cuda:0" device = "cuda:0"
batch_size = 32
eps = 1e-6
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_ContractionType():
ctype = ContractionType.AABB.to_cpp_version()
assert ctype == _C.ContractionTypeGetter(0)
ctype = ContractionType.UN_BOUNDED_TANH.to_cpp_version()
assert ctype == _C.ContractionTypeGetter(1)
ctype = ContractionType.UN_BOUNDED_SPHERE.to_cpp_version()
assert ctype == _C.ContractionTypeGetter(2)
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device") @pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_identity(): def test_identity():
samples = torch.rand([128, 3], device=device) x = torch.rand([batch_size, 3], device=device)
roi = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.float32, device=device) roi = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.float32, device=device)
samples_out = contract(samples, roi=roi) x_out = contract(x, roi=roi, type=ContractionType.AABB)
assert torch.allclose(samples_out, samples) assert torch.allclose(x_out, x, atol=eps)
samples_inv = contract(samples_out, roi=roi) x_inv = contract_inv(x_out, roi=roi, type=ContractionType.AABB)
assert torch.allclose(samples_inv, samples) assert torch.allclose(x_inv, x, atol=eps)
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device") @pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_normalization(): def test_aabb():
samples = torch.rand([128, 3], device=device) x = torch.rand([batch_size, 3], device=device)
roi = torch.tensor( roi = torch.tensor(
[-1, -1, -1, 1, 1, 1], dtype=torch.float32, device=device [-1, -1, -1, 1, 1, 1], dtype=torch.float32, device=device
) )
samples_out = contract(samples, roi=roi) x_out = contract(x, roi=roi, type=ContractionType.AABB)
assert torch.allclose(samples_out, samples * 0.5 + 0.5) x_out_tgt = x * 0.5 + 0.5
samples_inv = contract_inv(samples_out, roi=roi) assert torch.allclose(x_out, x_out_tgt, atol=eps)
assert torch.allclose(samples_inv, samples, atol=1e-6) x_inv = contract_inv(x_out, roi=roi, type=ContractionType.AABB)
assert torch.allclose(x_inv, x, atol=eps)
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_tanh():
x = torch.randn([batch_size, 3], device=device)
roi = torch.tensor(
[-0.2, -0.3, -0.4, 0.7, 0.8, 0.6], dtype=torch.float32, device=device
)
x_out = contract(x, roi=roi, type=ContractionType.UN_BOUNDED_TANH)
x_out_tgt = (
torch.tanh((x - roi[:3]) / (roi[3:] - roi[:3]) - 0.5) * 0.5 + 0.5
)
assert torch.allclose(x_out, x_out_tgt, atol=eps)
x_inv = contract_inv(x_out, roi=roi, type=ContractionType.UN_BOUNDED_TANH)
assert torch.allclose(x_inv, x, atol=eps)
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device") @pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_contract(): def test_sphere():
x = torch.rand([128, 3], device=device) x = torch.randn([batch_size, 3], device=device)
roi = torch.tensor( roi = torch.tensor(
[0.2, 0.3, 0.4, 0.7, 0.8, 0.6], dtype=torch.float32, device=device [-0.2, -0.3, -0.4, 0.7, 0.8, 0.6], dtype=torch.float32, device=device
) )
for type in [ x_out = contract(x, roi=roi, type=ContractionType.UN_BOUNDED_SPHERE)
ContractionType.UN_BOUNDED_SPHERE, assert ((x_out - 0.5).norm(dim=-1) < 0.5).all()
ContractionType.UN_BOUNDED_TANH, x_inv = contract_inv(x_out, roi=roi, type=ContractionType.UN_BOUNDED_SPHERE)
]: assert torch.allclose(x_inv, x, atol=eps)
x_unit = contract(x, roi=roi, type=type)
assert x_unit.max() <= 1 and x_unit.min() >= 0
x_inv = contract_inv(x_unit, roi=roi, type=type)
assert torch.allclose(x_inv, x, atol=1e-3)
if __name__ == "__main__": if __name__ == "__main__":
test_ContractionType()
test_identity() test_identity()
test_normalization() test_aabb()
test_contract() test_tanh()
test_sphere()
import pytest import pytest
import torch import torch
from nerfacc.contraction import ContractionType from nerfacc import OccupancyGrid
from nerfacc.grid import OccupancyGrid
device = "cuda:0" device = "cuda:0"
...@@ -15,9 +14,8 @@ def occ_eval_fn(x: torch.Tensor) -> torch.Tensor: ...@@ -15,9 +14,8 @@ def occ_eval_fn(x: torch.Tensor) -> torch.Tensor:
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device") @pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_occ_grid(): def test_occ_grid():
occ_grid = OccupancyGrid(roi_aabb=[0, 0, 0, 1, 1, 1], resolution=128).to( roi_aabb = [0, 0, 0, 1, 1, 1]
device occ_grid = OccupancyGrid(roi_aabb=roi_aabb, resolution=128).to(device)
)
occ_grid.every_n_step(0, occ_eval_fn, occ_thre=0.1) occ_grid.every_n_step(0, occ_eval_fn, occ_thre=0.1)
assert occ_grid.roi_aabb.shape == (6,) assert occ_grid.roi_aabb.shape == (6,)
assert occ_grid.binary.shape == (128, 128, 128) assert occ_grid.binary.shape == (128, 128, 128)
......
import pytest
import torch
from nerfacc import ray_aabb_intersect
device = "cuda:0"
batch_size = 32
eps = 1e-6
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_intersection():
rays_o = torch.rand([batch_size, 3], device=device)
rays_d = torch.randn([batch_size, 3], device=device)
aabb = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.float32, device=device)
t_min, t_max = ray_aabb_intersect(rays_o, rays_d, aabb)
assert (t_min == 0).all()
t = torch.rand_like(t_min) * (t_max - t_min) + t_min
x = rays_o + t.unsqueeze(-1) * rays_d
assert (x >= 0).all() and (x <= 1).all()
if __name__ == "__main__":
test_intersection()
import pytest
import torch
from nerfacc import unpack_to_ray_indices
device = "cuda:0"
batch_size = 32
eps = 1e-6
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_unpack_info():
packed_info = torch.tensor(
[[0, 1], [1, 0], [1, 4]], dtype=torch.int32, device=device
)
ray_indices_tgt = torch.tensor(
[0, 2, 2, 2, 2], dtype=torch.int64, device=device
)
ray_indices = unpack_to_ray_indices(packed_info)
assert torch.allclose(ray_indices, ray_indices_tgt)
if __name__ == "__main__":
test_unpack_info()
import pytest import pytest
import torch import torch
from nerfacc.grid import OccupancyGrid from nerfacc import OccupancyGrid, ray_marching, unpack_to_ray_indices
from nerfacc.ray_marching import ray_marching, unpack_to_ray_indices
device = "cuda:0" device = "cuda:0"
batch_size = 128 batch_size = 128
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment