Unverified Commit d16d5b74 authored by Ruilong Li(李瑞龙)'s avatar Ruilong Li(李瑞龙) Committed by GitHub
Browse files

tests (#20)

* typehints into description

* speed up occ field by 20%

* tests

* fix docs

* backward in tests

* cleanup volumetric rendering func
parent 1d38afa1
import math import math
import time import time
import imageio
import numpy as np import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
...@@ -33,12 +32,26 @@ def render_image(radiance_field, rays, render_bkgd, render_step_size): ...@@ -33,12 +32,26 @@ def render_image(radiance_field, rays, render_bkgd, render_step_size):
rays = namedtuple_map(lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays) rays = namedtuple_map(lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays)
else: else:
num_rays, _ = rays_shape num_rays, _ = rays_shape
def sigma_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
positions = (
frustum_origins + frustum_dirs * (frustum_starts + frustum_ends) / 2.0
)
return radiance_field.query_density(positions)
def sigma_rgb_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
positions = (
frustum_origins + frustum_dirs * (frustum_starts + frustum_ends) / 2.0
)
return radiance_field(positions, frustum_dirs)
results = [] results = []
chunk = torch.iinfo(torch.int32).max if radiance_field.training else 81920 chunk = torch.iinfo(torch.int32).max if radiance_field.training else 81920
for i in range(0, num_rays, chunk): for i in range(0, num_rays, chunk):
chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)
chunk_results = volumetric_rendering( chunk_results = volumetric_rendering(
query_fn=radiance_field.forward, # {x, dir} -> {rgb, density} sigma_fn=sigma_fn,
sigma_rgb_fn=sigma_rgb_fn,
rays_o=chunk_rays.origins, rays_o=chunk_rays.origins,
rays_d=chunk_rays.viewdirs, rays_d=chunk_rays.viewdirs,
scene_aabb=occ_field.aabb, scene_aabb=occ_field.aabb,
...@@ -46,19 +59,19 @@ def render_image(radiance_field, rays, render_bkgd, render_step_size): ...@@ -46,19 +59,19 @@ def render_image(radiance_field, rays, render_bkgd, render_step_size):
scene_resolution=occ_field.resolution, scene_resolution=occ_field.resolution,
render_bkgd=render_bkgd, render_bkgd=render_bkgd,
render_step_size=render_step_size, render_step_size=render_step_size,
near_plane=0.0,
stratified=radiance_field.training, stratified=radiance_field.training,
) )
results.append(chunk_results) results.append(chunk_results)
rgb, depth, acc, counter, compact_counter = [ colors, opacities, n_marching_samples, n_rendering_samples = [
torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r
for r in zip(*results) for r in zip(*results)
] ]
return ( return (
rgb.view((*rays_shape[:-1], -1)), colors.view((*rays_shape[:-1], -1)),
depth.view((*rays_shape[:-1], -1)), opacities.view((*rays_shape[:-1], -1)),
acc.view((*rays_shape[:-1], -1)), sum(n_marching_samples),
sum(counter), sum(n_rendering_samples),
sum(compact_counter),
) )
...@@ -66,7 +79,7 @@ if __name__ == "__main__": ...@@ -66,7 +79,7 @@ if __name__ == "__main__":
torch.manual_seed(42) torch.manual_seed(42)
device = "cuda:0" device = "cuda:0"
scene = "hotdog" scene = "lego"
# setup dataset # setup dataset
train_dataset = SubjectLoader( train_dataset = SubjectLoader(
...@@ -172,7 +185,7 @@ if __name__ == "__main__": ...@@ -172,7 +185,7 @@ if __name__ == "__main__":
# update occupancy grid # update occupancy grid
occ_field.every_n_step(step) occ_field.every_n_step(step)
rgb, depth, acc, counter, compact_counter = render_image( rgb, acc, counter, compact_counter = render_image(
radiance_field, rays, render_bkgd, render_step_size radiance_field, rays, render_bkgd, render_step_size
) )
num_rays = len(pixels) num_rays = len(pixels)
...@@ -214,7 +227,7 @@ if __name__ == "__main__": ...@@ -214,7 +227,7 @@ if __name__ == "__main__":
pixels = data["pixels"].to(device) pixels = data["pixels"].to(device)
render_bkgd = data["color_bkgd"].to(device) render_bkgd = data["color_bkgd"].to(device)
# rendering # rendering
rgb, depth, acc, _, _ = render_image( rgb, acc, _, _ = render_image(
radiance_field, rays, render_bkgd, render_step_size radiance_field, rays, render_bkgd, render_step_size
) )
mse = F.mse_loss(rgb, pixels) mse = F.mse_loss(rgb, pixels)
...@@ -222,10 +235,10 @@ if __name__ == "__main__": ...@@ -222,10 +235,10 @@ if __name__ == "__main__":
psnrs.append(psnr.item()) psnrs.append(psnr.item())
psnr_avg = sum(psnrs) / len(psnrs) psnr_avg = sum(psnrs) / len(psnrs)
print(f"evaluation: {psnr_avg=}") print(f"evaluation: {psnr_avg=}")
imageio.imwrite( # imageio.imwrite(
"acc_binary_test.png", # "acc_binary_test.png",
((acc > 0).float().cpu().numpy() * 255).astype(np.uint8), # ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
) # )
psnrs = [] psnrs = []
train_dataset.training = False train_dataset.training = False
...@@ -236,7 +249,7 @@ if __name__ == "__main__": ...@@ -236,7 +249,7 @@ if __name__ == "__main__":
pixels = data["pixels"].to(device) pixels = data["pixels"].to(device)
render_bkgd = data["color_bkgd"].to(device) render_bkgd = data["color_bkgd"].to(device)
# rendering # rendering
rgb, depth, acc, _, _ = render_image( rgb, acc, _, _ = render_image(
radiance_field, rays, render_bkgd, render_step_size radiance_field, rays, render_bkgd, render_step_size
) )
mse = F.mse_loss(rgb, pixels) mse = F.mse_loss(rgb, pixels)
...@@ -244,14 +257,14 @@ if __name__ == "__main__": ...@@ -244,14 +257,14 @@ if __name__ == "__main__":
psnrs.append(psnr.item()) psnrs.append(psnr.item())
psnr_avg = sum(psnrs) / len(psnrs) psnr_avg = sum(psnrs) / len(psnrs)
print(f"evaluation on train: {psnr_avg=}") print(f"evaluation on train: {psnr_avg=}")
imageio.imwrite( # imageio.imwrite(
"acc_binary_train.png", # "acc_binary_train.png",
((acc > 0).float().cpu().numpy() * 255).astype(np.uint8), # ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
) # )
imageio.imwrite( # imageio.imwrite(
"rgb_train.png", # "rgb_train.png",
(rgb.cpu().numpy() * 255).astype(np.uint8), # (rgb.cpu().numpy() * 255).astype(np.uint8),
) # )
train_dataset.training = True train_dataset.training = True
if step == 20_000: if step == 20_000:
......
...@@ -6,7 +6,9 @@ from torch import nn ...@@ -6,7 +6,9 @@ from torch import nn
# from torch_scatter import scatter_max # from torch_scatter import scatter_max
def meshgrid3d(res: List[int], device: Union[torch.device, str] = "cpu") -> torch.Tensor: def meshgrid3d(
res: List[int], device: Union[torch.device, str] = "cpu"
) -> torch.Tensor:
"""Create 3D grid coordinates. """Create 3D grid coordinates.
Args: Args:
...@@ -57,6 +59,7 @@ class OccupancyField(nn.Module): ...@@ -57,6 +59,7 @@ class OccupancyField(nn.Module):
grid_coords: The grid coordinates. It is a tensor of shape (num_cells, num_dim). grid_coords: The grid coordinates. It is a tensor of shape (num_cells, num_dim).
grid_indices: The grid indices. It is a tensor of shape (num_cells,). grid_indices: The grid indices. It is a tensor of shape (num_cells,).
""" """
aabb: torch.Tensor aabb: torch.Tensor
occ_grid: torch.Tensor occ_grid: torch.Tensor
occ_grid_binary: torch.Tensor occ_grid_binary: torch.Tensor
...@@ -96,10 +99,6 @@ class OccupancyField(nn.Module): ...@@ -96,10 +99,6 @@ class OccupancyField(nn.Module):
occ_grid_binary = torch.zeros(self.num_cells, dtype=torch.bool) occ_grid_binary = torch.zeros(self.num_cells, dtype=torch.bool)
self.register_buffer("occ_grid_binary", occ_grid_binary) self.register_buffer("occ_grid_binary", occ_grid_binary)
# Used for thresholding occ_grid
occ_grid_mean = occ_grid.mean()
self.register_buffer("occ_grid_mean", occ_grid_mean)
# Grid coords & indices # Grid coords & indices
grid_coords = meshgrid3d(self.resolution).reshape(self.num_cells, self.num_dim) grid_coords = meshgrid3d(self.resolution).reshape(self.num_cells, self.num_dim)
self.register_buffer("grid_coords", grid_coords) self.register_buffer("grid_coords", grid_coords)
...@@ -142,25 +141,22 @@ class OccupancyField(nn.Module): ...@@ -142,25 +141,22 @@ class OccupancyField(nn.Module):
indices = self._sample_uniform_and_occupied_cells(N) indices = self._sample_uniform_and_occupied_cells(N)
# infer occupancy: density * step_size # infer occupancy: density * step_size
tmp_occ_grid = -torch.ones_like(self.occ_grid)
grid_coords = self.grid_coords[indices] grid_coords = self.grid_coords[indices]
x = ( x = (
grid_coords + torch.rand_like(grid_coords.float()) grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)
) / self.resolution_tensor ) / self.resolution_tensor
bb_min, bb_max = torch.split(self.aabb, [self.num_dim, self.num_dim], dim=0) bb_min, bb_max = torch.split(self.aabb, [self.num_dim, self.num_dim], dim=0)
x = x * (bb_max - bb_min) + bb_min x = x * (bb_max - bb_min) + bb_min
tmp_occ = self.occ_eval_fn(x).squeeze(-1) occ = self.occ_eval_fn(x).squeeze(-1)
tmp_occ_grid[indices] = tmp_occ
# tmp_occ_grid, _ = scatter_max(tmp_occ, indices, dim=0, out=tmp_occ_grid)
# ema update # ema update
ema_mask = (self.occ_grid >= 0) & (tmp_occ_grid >= 0) self.occ_grid[indices] = torch.maximum(self.occ_grid[indices] * ema_decay, occ)
self.occ_grid[ema_mask] = torch.maximum( # suppose to use scatter max but emperically it is almost the same.
self.occ_grid[ema_mask] * ema_decay, tmp_occ_grid[ema_mask] # self.occ_grid, _ = scatter_max(
) # occ, indices, dim=0, out=self.occ_grid * ema_decay
self.occ_grid_mean = self.occ_grid.mean() # )
self.occ_grid_binary = self.occ_grid > torch.clamp( self.occ_grid_binary = self.occ_grid > torch.clamp(
self.occ_grid_mean, max=occ_thre self.occ_grid.mean(), max=occ_thre
) )
@torch.no_grad() @torch.no_grad()
......
from typing import Tuple, Optional, List from typing import List, Optional, Tuple
import torch import torch
from torch import Tensor from torch import Tensor
...@@ -46,7 +46,7 @@ def volumetric_marching( ...@@ -46,7 +46,7 @@ def volumetric_marching(
t_max: Optional[Tensor] = None, t_max: Optional[Tensor] = None,
render_step_size: float = 1e-3, render_step_size: float = 1e-3,
near_plane: float = 0.0, near_plane: float = 0.0,
stratified: bool = False stratified: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Volumetric marching with occupancy test. """Volumetric marching with occupancy test.
...@@ -142,8 +142,8 @@ def volumetric_rendering_steps( ...@@ -142,8 +142,8 @@ def volumetric_rendering_steps(
Args: Args:
packed_info: Stores infomation on which samples belong to the same ray. \ packed_info: Stores infomation on which samples belong to the same ray. \
See volumetric_marching for details. Tensor with shape (n_rays, 2). \ See volumetric_marching for details. Tensor with shape (n_rays, 2).
sigmas: Densities at those samples. Tensor with shape (n_samples, 1). sigmas: Densities at those samples. Tensor with shape (n_samples, 1).
frustum_starts: Where the frustum-shape sample starts along a ray. Tensor with \ frustum_starts: Where the frustum-shape sample starts along a ray. Tensor with \
shape (n_samples, 1). shape (n_samples, 1).
frustum_ends: Where the frustum-shape sample ends along a ray. Tensor with \ frustum_ends: Where the frustum-shape sample ends along a ray. Tensor with \
...@@ -152,9 +152,9 @@ def volumetric_rendering_steps( ...@@ -152,9 +152,9 @@ def volumetric_rendering_steps(
Returns: Returns:
A tuple of tensors containing A tuple of tensors containing
**compact_packed_info**: Compacted version of input packed_info. - **compact_packed_info**: Compacted version of input packed_info.
**compact_frustum_starts**: Compacted version of input frustum_starts. - **compact_frustum_starts**: Compacted version of input frustum_starts.
**compact_frustum_ends**: Compacted version of input frustum_ends. - **compact_frustum_ends**: Compacted version of input frustum_ends.
""" """
if ( if (
......
from typing import Callable, Tuple, List from typing import Callable, List, Tuple
import torch import torch
...@@ -11,18 +11,19 @@ from .utils import ( ...@@ -11,18 +11,19 @@ from .utils import (
def volumetric_rendering( def volumetric_rendering(
query_fn: Callable, sigma_fn: Callable,
sigma_rgb_fn: Callable,
rays_o: torch.Tensor, rays_o: torch.Tensor,
rays_d: torch.Tensor, rays_d: torch.Tensor,
scene_aabb: torch.Tensor, scene_aabb: torch.Tensor,
scene_occ_binary: torch.Tensor,
scene_resolution: List[int], scene_resolution: List[int],
scene_occ_binary: torch.Tensor,
render_bkgd: torch.Tensor, render_bkgd: torch.Tensor,
render_step_size: int, render_step_size: float = 1e-3,
near_plane: float = 0.0, near_plane: float = 0.0,
stratified: bool = False, stratified: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int]: ) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
"""A *fast* version of differentiable volumetric rendering.""" """Differentiable volumetric rendering."""
n_rays = rays_o.shape[0] n_rays = rays_o.shape[0]
rays_o = rays_o.contiguous() rays_o = rays_o.contiguous()
...@@ -31,8 +32,8 @@ def volumetric_rendering( ...@@ -31,8 +32,8 @@ def volumetric_rendering(
scene_occ_binary = scene_occ_binary.contiguous() scene_occ_binary = scene_occ_binary.contiguous()
render_bkgd = render_bkgd.contiguous() render_bkgd = render_bkgd.contiguous()
# get packed samples from ray marching & occupancy check.
with torch.no_grad(): with torch.no_grad():
# Ray marching and occupancy check.
( (
packed_info, packed_info,
frustum_origins, frustum_origins,
...@@ -40,77 +41,71 @@ def volumetric_rendering( ...@@ -40,77 +41,71 @@ def volumetric_rendering(
frustum_starts, frustum_starts,
frustum_ends, frustum_ends,
) = volumetric_marching( ) = volumetric_marching(
# rays
rays_o, rays_o,
rays_d, rays_d,
# density grid
aabb=scene_aabb, aabb=scene_aabb,
scene_resolution=scene_resolution, scene_resolution=scene_resolution,
scene_occ_binary=scene_occ_binary, scene_occ_binary=scene_occ_binary,
# sampling
render_step_size=render_step_size, render_step_size=render_step_size,
# optional settings
near_plane=near_plane, near_plane=near_plane,
stratified=stratified, stratified=stratified,
) )
frustum_positions = ( n_marching_samples = frustum_starts.shape[0]
frustum_origins + frustum_dirs * (frustum_starts + frustum_ends) / 2.0
# Query sigma without gradients
sigmas = sigma_fn(
frustum_origins,
frustum_dirs,
frustum_starts,
frustum_ends,
) )
steps_counter = frustum_origins.shape[0]
# compat the samples thru volumetric rendering # Ray marching and rendering check.
with torch.no_grad():
densities = query_fn(frustum_positions, frustum_dirs, only_density=True)
( (
compact_packed_info, packed_info,
compact_frustum_starts, frustum_starts,
compact_frustum_ends, frustum_ends,
compact_frustum_positions, frustum_origins,
compact_frustum_dirs, frustum_dirs,
) = volumetric_rendering_steps( ) = volumetric_rendering_steps(
packed_info, packed_info,
densities, sigmas,
frustum_starts, frustum_starts,
frustum_ends, frustum_ends,
frustum_positions, frustum_origins,
frustum_dirs, frustum_dirs,
) )
compact_steps_counter = compact_frustum_positions.shape[0] n_rendering_samples = frustum_starts.shape[0]
# network # Query sigma and color with gradients
compact_query_results = query_fn(compact_frustum_positions, compact_frustum_dirs) rgbs, sigmas = sigma_rgb_fn(
compact_rgbs, compact_densities = compact_query_results[0], compact_query_results[1] frustum_origins,
frustum_dirs,
# accumulation frustum_starts,
compact_weights, compact_ray_indices = volumetric_rendering_weights( frustum_ends,
compact_packed_info,
compact_densities,
compact_frustum_starts,
compact_frustum_ends,
) )
accumulated_color = volumetric_rendering_accumulate( assert rgbs.shape[-1] == 3, "rgbs must have 3 channels"
compact_weights, compact_ray_indices, compact_rgbs, n_rays assert sigmas.shape[-1] == 1, "sigmas must have 1 channel"
# Rendering: compute weights and ray indices.
weights, ray_indices = volumetric_rendering_weights(
packed_info, sigmas, frustum_starts, frustum_ends
) )
accumulated_weight = volumetric_rendering_accumulate(
compact_weights, compact_ray_indices, None, n_rays # Rendering: accumulate rgbs and opacities along the rays.
colors = volumetric_rendering_accumulate(
weights, ray_indices, values=rgbs, n_rays=n_rays
) )
accumulated_depth = volumetric_rendering_accumulate( opacities = volumetric_rendering_accumulate(
compact_weights, weights, ray_indices, values=None, n_rays=n_rays
compact_ray_indices,
(compact_frustum_starts + compact_frustum_ends) / 2.0,
n_rays,
) )
# TODO: use transmittance to compose bkgd color: # depths = volumetric_rendering_accumulate(
# https://github.com/NVlabs/instant-ngp/blob/14d6ba6fa899e9f069d2f65d33dbe3cd43056ddd/src/testbed_nerf.cu#L1400 # weights,
# ray_indices,
# values=(frustum_starts + frustum_ends) / 2.0,
# n_rays=n_rays,
# )
# accumulated_color = linear_to_srgb(accumulated_color) colors = colors + render_bkgd * (1.0 - opacities)
accumulated_color = accumulated_color + render_bkgd * (1.0 - accumulated_weight)
# accumulated_color = srgb_to_linear(accumulated_color)
return ( return colors, opacities, n_marching_samples, n_rendering_samples
accumulated_color,
accumulated_depth,
accumulated_weight,
steps_counter,
compact_steps_counter,
)
...@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" ...@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "nerfacc" name = "nerfacc"
version = "0.0.6" version = "0.0.7"
authors = [{name = "Ruilong", email = "ruilongli94@gmail.com"}] authors = [{name = "Ruilong", email = "ruilongli94@gmail.com"}]
license = { text="MIT" } license = { text="MIT" }
requires-python = ">=3.8" requires-python = ">=3.8"
...@@ -24,4 +24,5 @@ dependencies = [ ...@@ -24,4 +24,5 @@ dependencies = [
dev = [ dev = [
"black", "black",
"isort", "isort",
"pytest",
] ]
\ No newline at end of file
import torch
import tqdm
from nerfacc import volumetric_rendering
device = "cuda:0"
def sigma_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
return torch.rand_like(frustum_ends[:, :1])
def sigma_rgb_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
return torch.rand_like(frustum_ends[:, :1]), torch.rand_like(frustum_ends[:, :3])
def test_rendering():
scene_aabb = torch.tensor([0, 0, 0, 1, 1, 1], device=device).float()
scene_resolution = [128, 128, 128]
scene_occ_binary = torch.ones((128 * 128 * 128), device=device).bool()
rays_o = torch.rand((10000, 3), device=device)
rays_d = torch.randn((10000, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
render_bkgd = torch.ones(3, device=device)
for step in tqdm.tqdm(range(1000)):
volumetric_rendering(
sigma_fn,
sigma_rgb_fn,
rays_o,
rays_d,
scene_aabb,
scene_resolution,
scene_occ_binary,
render_bkgd,
render_step_size=1e-3,
near_plane=0.0,
stratified=False,
)
if __name__ == "__main__":
test_rendering()
import torch
import tqdm
from nerfacc import volumetric_marching
device = "cuda:0"
def test_marching():
scene_aabb = torch.tensor([0, 0, 0, 1, 1, 1], device=device).float()
scene_occ_binary = torch.ones((128 * 128 * 128), device=device).bool()
rays_o = torch.rand((10000, 3), device=device)
rays_d = torch.randn((10000, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
for step in tqdm.tqdm(range(5000)):
volumetric_marching(
rays_o,
rays_d,
aabb=scene_aabb,
scene_resolution=[128, 128, 128],
scene_occ_binary=scene_occ_binary,
)
if __name__ == "__main__":
test_marching()
import torch
import tqdm
from nerfacc import OccupancyField
device = "cuda:0"
def occ_eval_fn(positions: torch.Tensor) -> torch.Tensor:
return torch.rand_like(positions[:, :1])
def test_occ_field():
occ_field = OccupancyField(occ_eval_fn, aabb=[0, 0, 0, 1, 1, 1]).to(device)
for step in tqdm.tqdm(range(50000)):
occ_field.every_n_step(step, occ_thre=0.1)
if __name__ == "__main__":
test_occ_field()
import torch
import tqdm
from nerfacc import (
volumetric_marching,
volumetric_rendering_accumulate,
volumetric_rendering_steps,
volumetric_rendering_weights,
)
device = "cuda:0"
def test_rendering():
scene_aabb = torch.tensor([0, 0, 0, 1, 1, 1], device=device).float()
scene_occ_binary = torch.ones((128 * 128 * 128), device=device).bool()
rays_o = torch.rand((10000, 3), device=device)
rays_d = torch.randn((10000, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
for step in tqdm.tqdm(range(1000)):
(
packed_info,
frustum_origins,
frustum_dirs,
frustum_starts,
frustum_ends,
) = volumetric_marching(
rays_o,
rays_d,
aabb=scene_aabb,
scene_resolution=[128, 128, 128],
scene_occ_binary=scene_occ_binary,
)
sigmas = torch.rand_like(frustum_ends[:, :1], requires_grad=True) * 100
(
packed_info,
frustum_starts,
frustum_ends,
frustum_origins,
frustum_dirs,
) = volumetric_rendering_steps(
packed_info,
sigmas,
frustum_starts,
frustum_ends,
frustum_origins,
frustum_dirs,
)
weights, ray_indices = volumetric_rendering_weights(
packed_info,
sigmas,
frustum_starts,
frustum_ends,
)
values = torch.rand_like(sigmas, requires_grad=True)
accum_values = volumetric_rendering_accumulate(
weights,
ray_indices,
values,
n_rays=rays_o.shape[0],
)
accum_values.sum().backward()
if __name__ == "__main__":
test_rendering()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment