Unverified Commit 601c4c34 authored by Ruilong Li(李瑞龙)'s avatar Ruilong Li(李瑞龙) Committed by GitHub
Browse files

Add new Utils: pack & unpack data; cdf sampling; query grid (#57)

* new utils

* proper test for pack

* add test por cdf and query occ

* add deprecated warning

* bump version

* fix list return to tuple
parent 40075646
import pytest import pytest
import torch import torch
from nerfacc import OccupancyGrid, ray_marching, unpack_to_ray_indices from nerfacc import OccupancyGrid, ray_marching, unpack_info
device = "cuda:0" device = "cuda:0"
batch_size = 128 batch_size = 128
...@@ -39,7 +39,7 @@ def test_marching_with_grid(): ...@@ -39,7 +39,7 @@ def test_marching_with_grid():
far_plane=1.0, far_plane=1.0,
render_step_size=1e-2, render_step_size=1e-2,
) )
ray_indices = unpack_to_ray_indices(packed_info).long() ray_indices = unpack_info(packed_info).long()
samples = ( samples = (
rays_o[ray_indices] + rays_d[ray_indices] * (t_starts + t_ends) / 2.0 rays_o[ray_indices] + rays_d[ray_indices] * (t_starts + t_ends) / 2.0
) )
......
import pytest
import torch
from nerfacc import ray_marching, ray_resampling
device = "cuda:0"
batch_size = 128
@pytest.mark.skipif(not torch.cuda.is_available, reason="No CUDA device")
def test_resampling():
rays_o = torch.rand((batch_size, 3), device=device)
rays_d = torch.randn((batch_size, 3), device=device)
rays_d = rays_d / rays_d.norm(dim=-1, keepdim=True)
packed_info, t_starts, t_ends = ray_marching(
rays_o,
rays_d,
near_plane=0.1,
far_plane=1.0,
render_step_size=1e-3,
)
weights = torch.rand((t_starts.shape[0],), device=device)
packed_info, t_starts, t_ends = ray_resampling(
packed_info, t_starts, t_ends, weights, n_samples=32
)
assert t_starts.shape == t_ends.shape == (batch_size * 32, 1)
if __name__ == "__main__":
test_resampling()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment