trainval.py 11.3 KB
Newer Older
Ruilong Li's avatar
Ruilong Li committed
1
import argparse
Ruilong Li's avatar
Ruilong Li committed
2
3
4
5
6
7
8
import math
import time

import numpy as np
import torch
import torch.nn.functional as F
import tqdm
Ruilong Li's avatar
Ruilong Li committed
9
from datasets.nerf_synthetic import SubjectLoader, namedtuple_map
Ruilong Li's avatar
Ruilong Li committed
10
from radiance_fields.mlp import VanillaNeRFRadianceField
Ruilong Li's avatar
Ruilong Li committed
11
12
13
14
from radiance_fields.ngp import NGPradianceField

from nerfacc import OccupancyField, volumetric_rendering

Ruilong Li's avatar
Ruilong Li committed
15
TARGET_SAMPLE_BATCH_SIZE = 1 << 16
Ruilong Li's avatar
Ruilong Li committed
16

Ruilong Li's avatar
Ruilong Li committed
17

Ruilong Li's avatar
Ruilong Li committed
18
19
20
def render_image(
    radiance_field, rays, render_bkgd, render_step_size, test_chunk_size=81920
):
Ruilong Li's avatar
Ruilong Li committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
    """Render the pixels of an image.

    Args:
      radiance_field: the radiance field of nerf.
      rays: a `Rays` namedtuple, the rays to be rendered.

    Returns:
      rgb: torch.tensor, rendered color image.
      depth: torch.tensor, rendered depth image.
      acc: torch.tensor, rendered accumulated weights per pixel.
    """
    rays_shape = rays.origins.shape
    if len(rays_shape) == 3:
        height, width, _ = rays_shape
        num_rays = height * width
        rays = namedtuple_map(lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays)
    else:
        num_rays, _ = rays_shape
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
39
40
41
42
43
44
45
46
47
48
49
50
51

    def sigma_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
        positions = (
            frustum_origins + frustum_dirs * (frustum_starts + frustum_ends) / 2.0
        )
        return radiance_field.query_density(positions)

    def sigma_rgb_fn(frustum_origins, frustum_dirs, frustum_starts, frustum_ends):
        positions = (
            frustum_origins + frustum_dirs * (frustum_starts + frustum_ends) / 2.0
        )
        return radiance_field(positions, frustum_dirs)

Ruilong Li's avatar
Ruilong Li committed
52
    results = []
Ruilong Li's avatar
Ruilong Li committed
53
    chunk = torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size
Ruilong Li's avatar
Ruilong Li committed
54
55
    for i in range(0, num_rays, chunk):
        chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)
Ruilong Li's avatar
Ruilong Li committed
56
        chunk_results = volumetric_rendering(
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
57
58
            sigma_fn=sigma_fn,
            sigma_rgb_fn=sigma_rgb_fn,
Ruilong Li's avatar
Ruilong Li committed
59
60
61
62
63
64
            rays_o=chunk_rays.origins,
            rays_d=chunk_rays.viewdirs,
            scene_aabb=occ_field.aabb,
            scene_occ_binary=occ_field.occ_grid_binary,
            scene_resolution=occ_field.resolution,
            render_bkgd=render_bkgd,
Ruilong Li's avatar
Ruilong Li committed
65
            render_step_size=render_step_size,
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
66
            near_plane=0.0,
67
            stratified=radiance_field.training,
Ruilong Li's avatar
Ruilong Li committed
68
        )
Ruilong Li's avatar
Ruilong Li committed
69
        results.append(chunk_results)
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
70
    colors, opacities, n_marching_samples, n_rendering_samples = [
Ruilong Li's avatar
Ruilong Li committed
71
72
        torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r
        for r in zip(*results)
Ruilong Li's avatar
Ruilong Li committed
73
    ]
Ruilong Li's avatar
Ruilong Li committed
74
    return (
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
75
76
77
78
        colors.view((*rays_shape[:-1], -1)),
        opacities.view((*rays_shape[:-1], -1)),
        sum(n_marching_samples),
        sum(n_rendering_samples),
Ruilong Li's avatar
Ruilong Li committed
79
80
81
82
    )


if __name__ == "__main__":
83
    torch.manual_seed(42)
Ruilong Li's avatar
Ruilong Li committed
84

Ruilong Li's avatar
Ruilong Li committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "method",
        type=str,
        default="ngp",
        choices=["ngp", "vanilla"],
        help="which nerf to use",
    )
    parser.add_argument(
        "--train_split",
        type=str,
        default="trainval",
        choices=["train", "trainval"],
        help="which train split to use",
    )
Ruilong Li's avatar
Ruilong Li committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
    parser.add_argument(
        "--scene",
        type=str,
        default="lego",
        choices=[
            "chair",
            "drums",
            "ficus",
            "hotdog",
            "lego",
            "materials",
            "mic",
            "ship",
        ],
        help="which scene to use",
    )
    parser.add_argument(
        "--test_chunk_size",
        type=int,
        default=81920,
    )
Ruilong Li's avatar
Ruilong Li committed
121
122
    args = parser.parse_args()

Ruilong Li's avatar
Ruilong Li committed
123
    device = "cuda:0"
Ruilong Li's avatar
Ruilong Li committed
124
    scene = args.scene
Ruilong Li's avatar
Ruilong Li committed
125

Ruilong Li's avatar
Ruilong Li committed
126
127
128
129
130
131
132
133
    # setup the scene bounding box.
    scene_aabb = torch.tensor([-1.5, -1.5, -1.5, 1.5, 1.5, 1.5])
    # setup some rendering settings
    render_n_samples = 1024
    render_step_size = (
        (scene_aabb[3:] - scene_aabb[:3]).max() * math.sqrt(3) / render_n_samples
    ).item()

Ruilong Li's avatar
Ruilong Li committed
134
135
    # setup dataset
    train_dataset = SubjectLoader(
Ruilong Li's avatar
Ruilong Li committed
136
        subject_id=scene,
Ruilong Li's avatar
Ruilong Li committed
137
        root_fp="/home/ruilongli/data/nerf_synthetic/",
Ruilong Li's avatar
Ruilong Li committed
138
139
        split=args.train_split,
        num_rays=TARGET_SAMPLE_BATCH_SIZE // render_n_samples,
Ruilong Li's avatar
Ruilong Li committed
140
        # color_bkgd_aug="random",
Ruilong Li's avatar
Ruilong Li committed
141
    )
Ruilong Li's avatar
Ruilong Li committed
142
143
144
145

    train_dataset.images = train_dataset.images.to(device)
    train_dataset.camtoworlds = train_dataset.camtoworlds.to(device)
    train_dataset.K = train_dataset.K.to(device)
Ruilong Li's avatar
Ruilong Li committed
146
147
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
Ruilong Li's avatar
Ruilong Li committed
148
        num_workers=0,
Ruilong Li's avatar
Ruilong Li committed
149
        batch_size=None,
Ruilong Li's avatar
Ruilong Li committed
150
        # persistent_workers=True,
Ruilong Li's avatar
Ruilong Li committed
151
        shuffle=True,
Ruilong Li's avatar
Ruilong Li committed
152
    )
Ruilong Li's avatar
Ruilong Li committed
153

Ruilong Li's avatar
Ruilong Li committed
154
    test_dataset = SubjectLoader(
Ruilong Li's avatar
Ruilong Li committed
155
        subject_id=scene,
Ruilong Li's avatar
Ruilong Li committed
156
        root_fp="/home/ruilongli/data/nerf_synthetic/",
Ruilong Li's avatar
Ruilong Li committed
157
        split="test",
Ruilong Li's avatar
Ruilong Li committed
158
159
        num_rays=None,
    )
Ruilong Li's avatar
Ruilong Li committed
160
161
162
    test_dataset.images = test_dataset.images.to(device)
    test_dataset.camtoworlds = test_dataset.camtoworlds.to(device)
    test_dataset.K = test_dataset.K.to(device)
Ruilong Li's avatar
Ruilong Li committed
163
164
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
Ruilong Li's avatar
Ruilong Li committed
165
        num_workers=0,
Ruilong Li's avatar
Ruilong Li committed
166
        batch_size=None,
Ruilong Li's avatar
Ruilong Li committed
167
168
169
170
171
172
    )

    # setup the scene radiance field. Assume you have a NeRF model and
    # it has following functions:
    # - query_density(): {x} -> {density}
    # - forward(): {x, dirs} -> {rgb, density}
Ruilong Li's avatar
Ruilong Li committed
173
174
175
176
177
178
179
180
181
182
183
184
185
    if args.method == "ngp":
        radiance_field = NGPradianceField(aabb=scene_aabb).to(device)
        optimizer = torch.optim.Adam(radiance_field.parameters(), lr=1e-2, eps=1e-15)
        max_steps = 20000
        occ_field_warmup_steps = 2000
        grad_scaler = torch.cuda.amp.GradScaler(1)

    elif args.method == "vanilla":
        radiance_field = VanillaNeRFRadianceField().to(device)
        optimizer = torch.optim.Adam(radiance_field.parameters(), lr=5e-4)
        max_steps = 40000
        occ_field_warmup_steps = 256
        grad_scaler = torch.cuda.amp.GradScaler(2**10)
Ruilong Li's avatar
Ruilong Li committed
186

Ruilong Li's avatar
Ruilong Li committed
187
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
Ruilong Li's avatar
Ruilong Li committed
188
189
190
        optimizer,
        milestones=[max_steps // 2, max_steps * 3 // 4, max_steps * 9 // 10],
        gamma=0.33,
Ruilong Li's avatar
Ruilong Li committed
191
    )
Ruilong Li's avatar
Ruilong Li committed
192
193
194
195
196
197
198
199
200
201
202

    # setup occupancy field with eval function
    def occ_eval_fn(x: torch.Tensor) -> torch.Tensor:
        """Evaluate occupancy given positions.

        Args:
            x: positions with shape (N, 3).
        Returns:
            occupancy values with shape (N, 1).
        """
        density_after_activation = radiance_field.query_density(x)
Ruilong Li's avatar
Ruilong Li committed
203
        # those two are similar when density is small.
204
        # occupancy = 1.0 - torch.exp(-density_after_activation * render_step_size)
Ruilong Li's avatar
Ruilong Li committed
205
206
207
208
209
210
211
212
213
214
        occupancy = density_after_activation * render_step_size
        return occupancy

    occ_field = OccupancyField(
        occ_eval_fn=occ_eval_fn, aabb=scene_aabb, resolution=128
    ).to(device)

    # training
    step = 0
    tic = time.time()
Ruilong Li's avatar
Ruilong Li committed
215
216
    data_time = 0
    tic_data = time.time()
Ruilong Li's avatar
wtf  
Ruilong Li committed
217

Ruilong Li's avatar
Ruilong Li committed
218
    for epoch in range(10000000):
Ruilong Li's avatar
Ruilong Li committed
219
        for i in range(len(train_dataset)):
Ruilong Li's avatar
Ruilong Li committed
220
            radiance_field.train()
Ruilong Li's avatar
Ruilong Li committed
221
            data = train_dataset[i]
Ruilong Li's avatar
Ruilong Li committed
222
            data_time += time.time() - tic_data
Ruilong Li's avatar
Ruilong Li committed
223
224

            # generate rays from data and the gt pixel color
Ruilong Li's avatar
Ruilong Li committed
225
226
227
228
229
            # rays = namedtuple_map(lambda x: x.to(device), data["rays"])
            # pixels = data["pixels"].to(device)
            render_bkgd = data["color_bkgd"]
            rays = data["rays"]
            pixels = data["pixels"]
Ruilong Li's avatar
Ruilong Li committed
230

Ruilong Li's avatar
Ruilong Li committed
231
            # update occupancy grid
Ruilong Li's avatar
Ruilong Li committed
232
            occ_field.every_n_step(step, warmup_steps=occ_field_warmup_steps)
Ruilong Li's avatar
wtf  
Ruilong Li committed
233

Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
234
            rgb, acc, counter, compact_counter = render_image(
Ruilong Li's avatar
Ruilong Li committed
235
                radiance_field, rays, render_bkgd, render_step_size
Ruilong Li's avatar
readme  
Ruilong Li committed
236
            )
Ruilong Li's avatar
Ruilong Li committed
237
238
            num_rays = len(pixels)
            num_rays = int(
Ruilong Li's avatar
Ruilong Li committed
239
                num_rays * (TARGET_SAMPLE_BATCH_SIZE / float(compact_counter))
Ruilong Li's avatar
Ruilong Li committed
240
241
            )
            train_dataset.update_num_rays(num_rays)
Ruilong Li's avatar
Ruilong Li committed
242
            alive_ray_mask = acc.squeeze(-1) > 0
Ruilong Li's avatar
Ruilong Li committed
243

Ruilong Li's avatar
Ruilong Li committed
244
            # compute loss
Ruilong Li's avatar
Ruilong Li committed
245
            loss = F.smooth_l1_loss(rgb[alive_ray_mask], pixels[alive_ray_mask])
Ruilong Li's avatar
Ruilong Li committed
246

Ruilong Li's avatar
Ruilong Li committed
247
            optimizer.zero_grad()
Ruilong Li's avatar
Ruilong Li committed
248
249
            # do not unscale it because we are using Adam.
            grad_scaler.scale(loss).backward()
Ruilong Li's avatar
Ruilong Li committed
250
251
            optimizer.step()
            scheduler.step()
Ruilong Li's avatar
Ruilong Li committed
252

Ruilong Li's avatar
Ruilong Li committed
253
            if step % 100 == 0:
Ruilong Li's avatar
Ruilong Li committed
254
                elapsed_time = time.time() - tic
Ruilong Li's avatar
Ruilong Li committed
255
                loss = F.mse_loss(rgb[alive_ray_mask], pixels[alive_ray_mask])
Ruilong Li's avatar
Ruilong Li committed
256
                print(
Ruilong Li's avatar
Ruilong Li committed
257
                    f"elapsed_time={elapsed_time:.2f}s (data={data_time:.2f}s) | {step=} | "
Ruilong Li's avatar
Ruilong Li committed
258
259
                    f"loss={loss:.5f} | "
                    f"alive_ray_mask={alive_ray_mask.long().sum():d} | "
Ruilong Li's avatar
Ruilong Li committed
260
                    f"counter={counter:d} | compact_counter={compact_counter:d} | num_rays={len(pixels):d} |"
Ruilong Li's avatar
Ruilong Li committed
261
262
                )

Ruilong Li's avatar
Ruilong Li committed
263
            # if time.time() - tic > 300:
Ruilong Li's avatar
Ruilong Li committed
264
            if step >= max_steps and step % max_steps == 0 and step > 0:
Ruilong Li's avatar
Ruilong Li committed
265
266
                # evaluation
                radiance_field.eval()
Ruilong Li's avatar
Ruilong Li committed
267

Ruilong Li's avatar
Ruilong Li committed
268
269
270
271
272
273
274
275
                psnrs = []
                with torch.no_grad():
                    for data in tqdm.tqdm(test_dataloader):
                        # generate rays from data and the gt pixel color
                        rays = namedtuple_map(lambda x: x.to(device), data["rays"])
                        pixels = data["pixels"].to(device)
                        render_bkgd = data["color_bkgd"].to(device)
                        # rendering
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
276
                        rgb, acc, _, _ = render_image(
Ruilong Li's avatar
Ruilong Li committed
277
278
279
280
281
                            radiance_field,
                            rays,
                            render_bkgd,
                            render_step_size,
                            test_chunk_size=args.test_chunk_size,
Ruilong Li's avatar
Ruilong Li committed
282
283
284
285
286
287
                        )
                        mse = F.mse_loss(rgb, pixels)
                        psnr = -10.0 * torch.log(mse) / np.log(10.0)
                        psnrs.append(psnr.item())
                psnr_avg = sum(psnrs) / len(psnrs)
                print(f"evaluation: {psnr_avg=}")
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
288
289
290
291
                # imageio.imwrite(
                #     "acc_binary_test.png",
                #     ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
                # )
Ruilong Li's avatar
Ruilong Li committed
292

Ruilong Li's avatar
Ruilong Li committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
                # psnrs = []
                # train_dataset.training = False
                # with torch.no_grad():
                #     for data in tqdm.tqdm(train_dataloader):
                #         # generate rays from data and the gt pixel color
                #         rays = namedtuple_map(lambda x: x.to(device), data["rays"])
                #         pixels = data["pixels"].to(device)
                #         render_bkgd = data["color_bkgd"].to(device)
                #         # rendering
                #         rgb, acc, _, _ = render_image(
                #             radiance_field, rays, render_bkgd, render_step_size
                #         )
                #         mse = F.mse_loss(rgb, pixels)
                #         psnr = -10.0 * torch.log(mse) / np.log(10.0)
                #         psnrs.append(psnr.item())
                # psnr_avg = sum(psnrs) / len(psnrs)
                # print(f"evaluation on train: {psnr_avg=}")
Ruilong Li(李瑞龙)'s avatar
Ruilong Li(李瑞龙) committed
310
311
312
313
314
315
316
317
                # imageio.imwrite(
                #     "acc_binary_train.png",
                #     ((acc > 0).float().cpu().numpy() * 255).astype(np.uint8),
                # )
                # imageio.imwrite(
                #     "rgb_train.png",
                #     (rgb.cpu().numpy() * 255).astype(np.uint8),
                # )
Ruilong Li's avatar
Ruilong Li committed
318
319
                train_dataset.training = True

Ruilong Li's avatar
Ruilong Li committed
320
            if step == max_steps:
Ruilong Li's avatar
Ruilong Li committed
321
                print("training stops")
Ruilong Li's avatar
Ruilong Li committed
322
                exit()
Ruilong Li's avatar
Ruilong Li committed
323
            tic_data = time.time()
Ruilong Li's avatar
Ruilong Li committed
324

Ruilong Li's avatar
Ruilong Li committed
325
            step += 1