trainval.py 9.22 KB
Newer Older
Ruilong Li's avatar
Ruilong Li committed
1
2
3
import math
import time

Ruilong Li's avatar
Ruilong Li committed
4
import imageio
Ruilong Li's avatar
Ruilong Li committed
5
6
7
8
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
Ruilong Li's avatar
Ruilong Li committed
9
from datasets.nerf_synthetic import SubjectLoader, namedtuple_map
Ruilong Li's avatar
Ruilong Li committed
10
11
12
13
from radiance_fields.ngp import NGPradianceField

from nerfacc import OccupancyField, volumetric_rendering

Ruilong Li's avatar
Ruilong Li committed
14
TARGET_SAMPLE_BATCH_SIZE = 1 << 18
Ruilong Li's avatar
Ruilong Li committed
15

Ruilong Li's avatar
Ruilong Li committed
16
17

def render_image(radiance_field, rays, render_bkgd, render_step_size):
Ruilong Li's avatar
Ruilong Li committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    """Render the pixels of an image.

    Args:
      radiance_field: the radiance field of nerf.
      rays: a `Rays` namedtuple, the rays to be rendered.

    Returns:
      rgb: torch.tensor, rendered color image.
      depth: torch.tensor, rendered depth image.
      acc: torch.tensor, rendered accumulated weights per pixel.
    """
    rays_shape = rays.origins.shape
    if len(rays_shape) == 3:
        height, width, _ = rays_shape
        num_rays = height * width
        rays = namedtuple_map(lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays)
    else:
        num_rays, _ = rays_shape
    results = []
37
    chunk = torch.iinfo(torch.int32).max if radiance_field.training else 81920
Ruilong Li's avatar
Ruilong Li committed
38
39
    for i in range(0, num_rays, chunk):
        chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)
Ruilong Li's avatar
Ruilong Li committed
40
        chunk_results = volumetric_rendering(
Ruilong Li's avatar
Ruilong Li committed
41
42
43
44
45
46
47
            query_fn=radiance_field.forward,  # {x, dir} -> {rgb, density}
            rays_o=chunk_rays.origins,
            rays_d=chunk_rays.viewdirs,
            scene_aabb=occ_field.aabb,
            scene_occ_binary=occ_field.occ_grid_binary,
            scene_resolution=occ_field.resolution,
            render_bkgd=render_bkgd,
Ruilong Li's avatar
Ruilong Li committed
48
            render_step_size=render_step_size,
Ruilong Li's avatar
Ruilong Li committed
49
        )
Ruilong Li's avatar
Ruilong Li committed
50
        results.append(chunk_results)
Ruilong Li's avatar
Ruilong Li committed
51
    rgb, depth, acc, counter, compact_counter = [
Ruilong Li's avatar
Ruilong Li committed
52
53
        torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r
        for r in zip(*results)
Ruilong Li's avatar
Ruilong Li committed
54
    ]
Ruilong Li's avatar
Ruilong Li committed
55
56
57
58
    return (
        rgb.view((*rays_shape[:-1], -1)),
        depth.view((*rays_shape[:-1], -1)),
        acc.view((*rays_shape[:-1], -1)),
Ruilong Li's avatar
Ruilong Li committed
59
60
        sum(counter),
        sum(compact_counter),
Ruilong Li's avatar
Ruilong Li committed
61
62
63
64
    )


if __name__ == "__main__":
65
    torch.manual_seed(42)
Ruilong Li's avatar
Ruilong Li committed
66
67

    device = "cuda:0"
Ruilong Li's avatar
Ruilong Li committed
68
    scene = "lego"
Ruilong Li's avatar
Ruilong Li committed
69
70
71

    # setup dataset
    train_dataset = SubjectLoader(
Ruilong Li's avatar
Ruilong Li committed
72
        subject_id=scene,
Ruilong Li's avatar
Ruilong Li committed
73
        root_fp="/home/ruilongli/data/nerf_synthetic/",
Ruilong Li's avatar
Ruilong Li committed
74
        split="trainval",
Ruilong Li's avatar
Ruilong Li committed
75
        num_rays=1024,
Ruilong Li's avatar
Ruilong Li committed
76
        # color_bkgd_aug="random",
Ruilong Li's avatar
Ruilong Li committed
77
    )
Ruilong Li's avatar
Ruilong Li committed
78
79
80
81

    train_dataset.images = train_dataset.images.to(device)
    train_dataset.camtoworlds = train_dataset.camtoworlds.to(device)
    train_dataset.K = train_dataset.K.to(device)
Ruilong Li's avatar
Ruilong Li committed
82
83
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
Ruilong Li's avatar
Ruilong Li committed
84
        num_workers=0,
Ruilong Li's avatar
Ruilong Li committed
85
        batch_size=None,
Ruilong Li's avatar
Ruilong Li committed
86
        # persistent_workers=True,
Ruilong Li's avatar
Ruilong Li committed
87
        shuffle=True,
Ruilong Li's avatar
Ruilong Li committed
88
    )
Ruilong Li's avatar
Ruilong Li committed
89

Ruilong Li's avatar
Ruilong Li committed
90
    test_dataset = SubjectLoader(
Ruilong Li's avatar
Ruilong Li committed
91
        subject_id=scene,
Ruilong Li's avatar
Ruilong Li committed
92
        root_fp="/home/ruilongli/data/nerf_synthetic/",
Ruilong Li's avatar
Ruilong Li committed
93
        split="test",
Ruilong Li's avatar
Ruilong Li committed
94
95
        num_rays=None,
    )
Ruilong Li's avatar
Ruilong Li committed
96
97
98
    test_dataset.images = test_dataset.images.to(device)
    test_dataset.camtoworlds = test_dataset.camtoworlds.to(device)
    test_dataset.K = test_dataset.K.to(device)
Ruilong Li's avatar
Ruilong Li committed
99
100
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
Ruilong Li's avatar
Ruilong Li committed
101
        num_workers=0,
Ruilong Li's avatar
Ruilong Li committed
102
        batch_size=None,
Ruilong Li's avatar
Ruilong Li committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
    )

    # setup the scene bounding box.
    scene_aabb = torch.tensor([-1.5, -1.5, -1.5, 1.5, 1.5, 1.5])

    # setup the scene radiance field. Assume you have a NeRF model and
    # it has following functions:
    # - query_density(): {x} -> {density}
    # - forward(): {x, dirs} -> {rgb, density}
    radiance_field = NGPradianceField(aabb=scene_aabb).to(device)

    # setup some rendering settings
    render_n_samples = 1024
    render_step_size = (
        (scene_aabb[3:] - scene_aabb[:3]).max() * math.sqrt(3) / render_n_samples
Ruilong Li's avatar
Ruilong Li committed
118
    ).item()
Ruilong Li's avatar
Ruilong Li committed
119

Ruilong Li's avatar
Ruilong Li committed
120
121
122
    optimizer = torch.optim.Adam(
        radiance_field.parameters(),
        lr=1e-2,
Ruilong Li's avatar
Ruilong Li committed
123
        # betas=(0.9, 0.99),
Ruilong Li's avatar
Ruilong Li committed
124
        eps=1e-15,
Ruilong Li's avatar
Ruilong Li committed
125
        # weight_decay=1e-6,
Ruilong Li's avatar
Ruilong Li committed
126
    )
Ruilong Li's avatar
Ruilong Li committed
127
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
Ruilong Li's avatar
Ruilong Li committed
128
        optimizer, milestones=[10000, 15000, 18000], gamma=0.33
Ruilong Li's avatar
Ruilong Li committed
129
    )
Ruilong Li's avatar
Ruilong Li committed
130
131
132
133
134
135
136
137
138
139
140

    # setup occupancy field with eval function
    def occ_eval_fn(x: torch.Tensor) -> torch.Tensor:
        """Evaluate occupancy given positions.

        Args:
            x: positions with shape (N, 3).
        Returns:
            occupancy values with shape (N, 1).
        """
        density_after_activation = radiance_field.query_density(x)
Ruilong Li's avatar
Ruilong Li committed
141
        # those two are similar when density is small.
142
        # occupancy = 1.0 - torch.exp(-density_after_activation * render_step_size)
Ruilong Li's avatar
Ruilong Li committed
143
144
145
146
147
148
149
150
151
152
        occupancy = density_after_activation * render_step_size
        return occupancy

    occ_field = OccupancyField(
        occ_eval_fn=occ_eval_fn, aabb=scene_aabb, resolution=128
    ).to(device)

    # training
    step = 0
    tic = time.time()
Ruilong Li's avatar
Ruilong Li committed
153
154
    data_time = 0
    tic_data = time.time()
Ruilong Li's avatar
wtf  
Ruilong Li committed
155

Ruilong Li's avatar
Ruilong Li committed
156
157
    # Scaling up the gradients for Adam
    grad_scaler = torch.cuda.amp.GradScaler(2**10)
Ruilong Li's avatar
Ruilong Li committed
158
    for epoch in range(10000000):
Ruilong Li's avatar
Ruilong Li committed
159
        for i in range(len(train_dataset)):
Ruilong Li's avatar
Ruilong Li committed
160
            radiance_field.train()
Ruilong Li's avatar
Ruilong Li committed
161
            data = train_dataset[i]
Ruilong Li's avatar
Ruilong Li committed
162
            data_time += time.time() - tic_data
Ruilong Li's avatar
Ruilong Li committed
163
164

            # generate rays from data and the gt pixel color
Ruilong Li's avatar
Ruilong Li committed
165
166
167
168
169
            # rays = namedtuple_map(lambda x: x.to(device), data["rays"])
            # pixels = data["pixels"].to(device)
            render_bkgd = data["color_bkgd"]
            rays = data["rays"]
            pixels = data["pixels"]
Ruilong Li's avatar
Ruilong Li committed
170

Ruilong Li's avatar
Ruilong Li committed
171
172
            # update occupancy grid
            occ_field.every_n_step(step)
Ruilong Li's avatar
wtf  
Ruilong Li committed
173

Ruilong Li's avatar
Ruilong Li committed
174
            rgb, depth, acc, counter, compact_counter = render_image(
Ruilong Li's avatar
Ruilong Li committed
175
                radiance_field, rays, render_bkgd, render_step_size
Ruilong Li's avatar
readme  
Ruilong Li committed
176
            )
Ruilong Li's avatar
Ruilong Li committed
177
178
            num_rays = len(pixels)
            num_rays = int(
Ruilong Li's avatar
Ruilong Li committed
179
                num_rays * (TARGET_SAMPLE_BATCH_SIZE / float(compact_counter))
Ruilong Li's avatar
Ruilong Li committed
180
181
            )
            train_dataset.update_num_rays(num_rays)
Ruilong Li's avatar
Ruilong Li committed
182
            alive_ray_mask = acc.squeeze(-1) > 0
Ruilong Li's avatar
Ruilong Li committed
183

Ruilong Li's avatar
Ruilong Li committed
184
            # compute loss
Ruilong Li's avatar
Ruilong Li committed
185
            loss = F.smooth_l1_loss(rgb[alive_ray_mask], pixels[alive_ray_mask])
Ruilong Li's avatar
Ruilong Li committed
186

Ruilong Li's avatar
Ruilong Li committed
187
            optimizer.zero_grad()
Ruilong Li's avatar
Ruilong Li committed
188
189
            # do not unscale it because we are using Adam.
            grad_scaler.scale(loss).backward()
Ruilong Li's avatar
Ruilong Li committed
190
191
            optimizer.step()
            scheduler.step()
Ruilong Li's avatar
Ruilong Li committed
192

Ruilong Li's avatar
Ruilong Li committed
193
            if step % 100 == 0:
Ruilong Li's avatar
Ruilong Li committed
194
                elapsed_time = time.time() - tic
Ruilong Li's avatar
Ruilong Li committed
195
                loss = F.mse_loss(rgb[alive_ray_mask], pixels[alive_ray_mask])
Ruilong Li's avatar
Ruilong Li committed
196
                print(
Ruilong Li's avatar
Ruilong Li committed
197
                    f"elapsed_time={elapsed_time:.2f}s (data={data_time:.2f}s) | {step=} | "
Ruilong Li's avatar
Ruilong Li committed
198
199
                    f"loss={loss:.5f} | "
                    f"alive_ray_mask={alive_ray_mask.long().sum():d} | "
Ruilong Li's avatar
Ruilong Li committed
200
                    f"counter={counter:d} | compact_counter={compact_counter:d} | num_rays={len(pixels):d} |"
Ruilong Li's avatar
Ruilong Li committed
201
202
                )

Ruilong Li's avatar
Ruilong Li committed
203
            # if time.time() - tic > 300:
Ruilong Li's avatar
Ruilong Li committed
204
            if step >= 20_000 and step % 5000 == 0 and step > 0:
Ruilong Li's avatar
Ruilong Li committed
205
206
                # evaluation
                radiance_field.eval()
Ruilong Li's avatar
Ruilong Li committed
207

Ruilong Li's avatar
Ruilong Li committed
208
209
210
211
212
213
214
215
                psnrs = []
                with torch.no_grad():
                    for data in tqdm.tqdm(test_dataloader):
                        # generate rays from data and the gt pixel color
                        rays = namedtuple_map(lambda x: x.to(device), data["rays"])
                        pixels = data["pixels"].to(device)
                        render_bkgd = data["color_bkgd"].to(device)
                        # rendering
Ruilong Li's avatar
Ruilong Li committed
216
                        rgb, depth, acc, _, _ = render_image(
Ruilong Li's avatar
Ruilong Li committed
217
218
219
220
221
222
223
                            radiance_field, rays, render_bkgd, render_step_size
                        )
                        mse = F.mse_loss(rgb, pixels)
                        psnr = -10.0 * torch.log(mse) / np.log(10.0)
                        psnrs.append(psnr.item())
                psnr_avg = sum(psnrs) / len(psnrs)
                print(f"evaluation: {psnr_avg=}")
Ruilong Li's avatar
Ruilong Li committed
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
                imageio.imwrite(
                    "acc_binary_test.png",
                    ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
                )

                psnrs = []
                train_dataset.training = False
                with torch.no_grad():
                    for data in tqdm.tqdm(train_dataloader):
                        # generate rays from data and the gt pixel color
                        rays = namedtuple_map(lambda x: x.to(device), data["rays"])
                        pixels = data["pixels"].to(device)
                        render_bkgd = data["color_bkgd"].to(device)
                        # rendering
                        rgb, depth, acc, _, _ = render_image(
                            radiance_field, rays, render_bkgd, render_step_size
                        )
                        mse = F.mse_loss(rgb, pixels)
                        psnr = -10.0 * torch.log(mse) / np.log(10.0)
                        psnrs.append(psnr.item())
                psnr_avg = sum(psnrs) / len(psnrs)
                print(f"evaluation on train: {psnr_avg=}")
                imageio.imwrite(
                    "acc_binary_train.png",
                    ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
                )
                imageio.imwrite(
                    "rgb_train.png",
                    (rgb.cpu().numpy() * 255).astype(np.uint8),
                )
                train_dataset.training = True

            if step == 20_000:
                print("training stops")
Ruilong Li's avatar
Ruilong Li committed
258
                exit()
Ruilong Li's avatar
Ruilong Li committed
259
            tic_data = time.time()
Ruilong Li's avatar
Ruilong Li committed
260

Ruilong Li's avatar
Ruilong Li committed
261
            step += 1