nerf_synthetic.py 7.34 KB
Newer Older
Ruilong Li's avatar
Ruilong Li committed
1
import collections
Ruilong Li's avatar
Ruilong Li committed
2
3
4
5
6
7
import json
import os

import imageio.v2 as imageio
import numpy as np
import torch
Ruilong Li's avatar
Ruilong Li committed
8
import torch.nn.functional as F
Ruilong Li's avatar
Ruilong Li committed
9

Ruilong Li's avatar
Ruilong Li committed
10
11
12
13
14
15
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))


def namedtuple_map(fn, tup):
    """Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
    return type(tup)(*(None if x is None else fn(x) for x in tup))
Ruilong Li's avatar
Ruilong Li committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41


def _load_renderings(root_fp: str, subject_id: str, split: str):
    """Load images from disk."""
    if not root_fp.startswith("/"):
        # allow relative path. e.g., "./data/nerf_synthetic/"
        root_fp = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            "..",
            "..",
            root_fp,
        )

    data_dir = os.path.join(root_fp, subject_id)
    with open(os.path.join(data_dir, "transforms_{}.json".format(split)), "r") as fp:
        meta = json.load(fp)
    images = []
    camtoworlds = []

    for i in range(len(meta["frames"])):
        frame = meta["frames"][i]
        fname = os.path.join(data_dir, frame["file_path"] + ".png")
        rgba = imageio.imread(fname)
        camtoworlds.append(frame["transform_matrix"])
        images.append(rgba)

Ruilong Li's avatar
Ruilong Li committed
42
43
    images = np.stack(images, axis=0)
    camtoworlds = np.stack(camtoworlds, axis=0)
Ruilong Li's avatar
Ruilong Li committed
44
45
46
47
48
49
50
51

    h, w = images.shape[1:3]
    camera_angle_x = float(meta["camera_angle_x"])
    focal = 0.5 * w / np.tan(0.5 * camera_angle_x)

    return images, camtoworlds, focal


Ruilong Li's avatar
Ruilong Li committed
52
class SubjectLoader(torch.utils.data.Dataset):
Ruilong Li's avatar
Ruilong Li committed
53
54
    """Single subject data loader for training and evaluation."""

Ruilong Li's avatar
Ruilong Li committed
55
    SPLITS = ["train", "val", "trainval", "test"]
Ruilong Li's avatar
Ruilong Li committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    SUBJECT_IDS = [
        "chair",
        "drums",
        "ficus",
        "hotdog",
        "lego",
        "materials",
        "mic",
    ]

    WIDTH, HEIGHT = 800, 800
    NEAR, FAR = 2.0, 6.0

    def __init__(
        self,
        subject_id: str,
        root_fp: str,
        split: str,
        color_bkgd_aug: str = "white",
        num_rays: int = None,
        near: float = None,
        far: float = None,
Ruilong Li's avatar
Ruilong Li committed
78
        batch_over_images: bool = True,
Ruilong Li's avatar
Ruilong Li committed
79
    ):
Ruilong Li's avatar
Ruilong Li committed
80
        super().__init__()
Ruilong Li's avatar
Ruilong Li committed
81
82
83
84
85
86
87
        assert split in self.SPLITS, "%s" % split
        assert subject_id in self.SUBJECT_IDS, "%s" % subject_id
        assert color_bkgd_aug in ["white", "black", "random"]
        self.split = split
        self.num_rays = num_rays
        self.near = self.NEAR if near is None else near
        self.far = self.FAR if far is None else far
Ruilong Li's avatar
Ruilong Li committed
88
        self.training = (num_rays is not None) and (split in ["train", "trainval"])
Ruilong Li's avatar
Ruilong Li committed
89
        self.color_bkgd_aug = color_bkgd_aug
Ruilong Li's avatar
Ruilong Li committed
90
        self.batch_over_images = batch_over_images
Ruilong Li's avatar
Ruilong Li committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
        if split == "trainval":
            _images_train, _camtoworlds_train, _focal_train = _load_renderings(
                root_fp, subject_id, "train"
            )
            _images_val, _camtoworlds_val, _focal_val = _load_renderings(
                root_fp, subject_id, "val"
            )
            self.images = np.concatenate([_images_train, _images_val])
            self.camtoworlds = np.concatenate([_camtoworlds_train, _camtoworlds_val])
            self.focal = _focal_train
        else:
            self.images, self.camtoworlds, self.focal = _load_renderings(
                root_fp, subject_id, split
            )
Ruilong Li's avatar
Ruilong Li committed
105
106
107
108
109
110
111
112
113
114
        self.images = torch.from_numpy(self.images).to(torch.uint8)
        self.camtoworlds = torch.from_numpy(self.camtoworlds).to(torch.float32)
        self.K = torch.tensor(
            [
                [self.focal, 0, self.WIDTH / 2.0],
                [0, self.focal, self.HEIGHT / 2.0],
                [0, 0, 1],
            ],
            dtype=torch.float32,
        )  # (3, 3)
Ruilong Li's avatar
Ruilong Li committed
115
116
117
118
119
        assert self.images.shape[1:3] == (self.HEIGHT, self.WIDTH)

    def __len__(self):
        return len(self.images)

Ruilong Li's avatar
Ruilong Li committed
120
    @torch.no_grad()
Ruilong Li's avatar
Ruilong Li committed
121
122
123
124
125
    def __getitem__(self, index):
        data = self.fetch_data(index)
        data = self.preprocess(data)
        return data

Ruilong Li's avatar
Ruilong Li committed
126
127
128
129
130
131
132
    def preprocess(self, data):
        """Process the fetched / cached data with randomness."""
        rgba, rays = data["rgba"], data["rays"]
        pixels, alpha = torch.split(rgba, [3, 1], dim=-1)

        if self.training:
            if self.color_bkgd_aug == "random":
Ruilong Li's avatar
Ruilong Li committed
133
                color_bkgd = torch.rand(3, device=self.images.device)
Ruilong Li's avatar
Ruilong Li committed
134
            elif self.color_bkgd_aug == "white":
Ruilong Li's avatar
Ruilong Li committed
135
                color_bkgd = torch.ones(3, device=self.images.device)
Ruilong Li's avatar
Ruilong Li committed
136
            elif self.color_bkgd_aug == "black":
Ruilong Li's avatar
Ruilong Li committed
137
                color_bkgd = torch.zeros(3, device=self.images.device)
Ruilong Li's avatar
Ruilong Li committed
138
139
        else:
            # just use white during inference
Ruilong Li's avatar
Ruilong Li committed
140
            color_bkgd = torch.ones(3, device=self.images.device)
Ruilong Li's avatar
Ruilong Li committed
141
142
143
144
145
146
147
148
149

        pixels = pixels * alpha + color_bkgd * (1.0 - alpha)
        return {
            "pixels": pixels,  # [n_rays, 3] or [h, w, 3]
            "rays": rays,  # [n_rays,] or [h, w]
            "color_bkgd": color_bkgd,  # [3,]
            **{k: v for k, v in data.items() if k not in ["rgba", "rays"]},
        }

Ruilong Li's avatar
Ruilong Li committed
150
151
152
    def update_num_rays(self, num_rays):
        self.num_rays = num_rays

Ruilong Li's avatar
Ruilong Li committed
153
154
    def fetch_data(self, index):
        """Fetch the data (it maybe cached for multiple batches)."""
Ruilong Li's avatar
Ruilong Li committed
155
156
        num_rays = self.num_rays

Ruilong Li's avatar
Ruilong Li committed
157
158
159
160
161
        if self.training:
            if self.batch_over_images:
                image_id = torch.randint(
                    0,
                    len(self.images),
Ruilong Li's avatar
Ruilong Li committed
162
                    size=(num_rays,),
Ruilong Li's avatar
Ruilong Li committed
163
164
165
166
167
                    device=self.images.device,
                )
            else:
                image_id = [index]
            x = torch.randint(
Ruilong Li's avatar
Ruilong Li committed
168
                0, self.WIDTH, size=(num_rays,), device=self.images.device
Ruilong Li's avatar
Ruilong Li committed
169
170
            )
            y = torch.randint(
Ruilong Li's avatar
Ruilong Li committed
171
                0, self.HEIGHT, size=(num_rays,), device=self.images.device
Ruilong Li's avatar
Ruilong Li committed
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
            )
        else:
            image_id = [index]
            x, y = torch.meshgrid(
                torch.arange(self.WIDTH, device=self.images.device),
                torch.arange(self.HEIGHT, device=self.images.device),
                indexing="xy",
            )
            x = x.flatten()
            y = y.flatten()

        # generate rays
        rgba = self.images[image_id, y, x] / 255.0  # (num_rays, 4)
        c2w = self.camtoworlds[image_id]  # (num_rays, 3, 4)
        camera_dirs = F.pad(
            torch.stack(
                [
                    (x - self.K[0, 2] + 0.5) / self.K[0, 0],
                    (y - self.K[1, 2] + 0.5) / self.K[1, 1],
                ],
                dim=-1,
            ),
            (0, 1),
            value=1,
        )  # [num_rays, 3]
        camera_dirs[..., [1, 2]] *= -1  # opengl format

        # [n_cams, height, width, 3]
        directions = (camera_dirs[:, None, :] * c2w[:, :3, :3]).sum(dim=-1)
        origins = torch.broadcast_to(c2w[:, :3, -1], directions.shape)
        viewdirs = directions / torch.linalg.norm(directions, dim=-1, keepdims=True)
Ruilong Li's avatar
Ruilong Li committed
203

Ruilong Li's avatar
Ruilong Li committed
204
        if self.training:
Ruilong Li's avatar
Ruilong Li committed
205
206
207
            origins = torch.reshape(origins, (num_rays, 3))
            viewdirs = torch.reshape(viewdirs, (num_rays, 3))
            rgba = torch.reshape(rgba, (num_rays, 4))
Ruilong Li's avatar
Ruilong Li committed
208
        else:
Ruilong Li's avatar
Ruilong Li committed
209
210
211
212
213
            origins = torch.reshape(origins, (self.HEIGHT, self.WIDTH, 3))
            viewdirs = torch.reshape(viewdirs, (self.HEIGHT, self.WIDTH, 3))
            rgba = torch.reshape(rgba, (self.HEIGHT, self.WIDTH, 4))

        rays = Rays(origins=origins, viewdirs=viewdirs)
Ruilong Li's avatar
Ruilong Li committed
214
215
216

        return {
            "rgba": rgba,  # [h, w, 4] or [num_rays, 4]
Ruilong Li's avatar
Ruilong Li committed
217
            "rays": rays,  # [h, w, 3] or [num_rays, 3]
Ruilong Li's avatar
Ruilong Li committed
218
        }