usage.rst 2.38 KB
Newer Older
Ruilong Li's avatar
Ruilong Li committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
Usage
=====

.. _installation:

Installation
------------

To use nerfacc, first install it using pip:

.. code-block:: console

   (.venv) $ pip install git+https://github.com/liruilong940607/nerfacc

Example of use
----------------

.. code-block:: python

    from typing import Callable, List, Union
    from torch import Tensor
    import torch
    import torch.nn.function as F

    from nerfacc import OccupancyField, volumetric_rendering

    # setup the scene bounding box.
    scene_aabb = torch.tensor([-1.5, -1.5, -1.5, 1.5, 1.5, 1.5]).cuda()

    # setup the scene radiance field. Assume you have a NeRF model and 
    # it has following functions:
    # - query_density(): {x} -> {density} 
    # - forward(): {x, dirs} -> {rgb, density}
    radiance_field = ...

    # setup some rendering settings
    render_n_samples = 1024
    render_bkgd = torch.ones(3).cuda()
    render_step_size = (
        (scene_aabb[3:] - scene_aabb[:3]).max() * math.sqrt(3) / render_n_samples
    )
    
    # setup occupancy field with eval function
    def occ_eval_fn(x: torch.Tensor) -> torch.Tensor:
        """Evaluate occupancy given positions.

        Args:
            x: positions with shape (N, 3).
        Returns:
            occupancy values with shape (N, 1). 
        """
        density_after_activation = radiance_field.query_density(x)
        occupancy = density_after_activation * render_step_size
        return occupancy
    occ_field = OccupancyField(occ_eval_fn=occ_eval_fn, aabb=aabb, resolution=128)

    # training
    for step in range(10_000):
        # generate rays from data and the gt pixel color
        rays = ...
        pixels = ...

        # update occupancy grid
        occ_field.every_n_step(step)        

        # rendering
        (
            accumulated_color,
            accumulated_depth,
            accumulated_weight,
            _,
        ) = volumetric_rendering(
            query_fn=radiance_field.forward,  # {x, dir} -> {rgb, density}
            rays_o=rays.origins,
            rays_d=rays.viewdirs,
            scene_aabb=aabb,
            scene_occ_binary=occupancy_field.occ_grid_binary,
            scene_resolution=occupancy_field.resolution,
            render_bkgd=render_bkgd,
            render_n_samples=render_n_samples,
            # other kwargs for `query_fn`
            ...,
        )

        # compute loss
        loss = F.mse_loss(accumulated_color, pixels)