pulsar_multiview.py 7 KB
Newer Older
Christoph Lassner's avatar
Christoph Lassner committed
1
2
3
4
5
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This example demonstrates multiview 3D reconstruction using the plain
pulsar interface. For this, reference images have been pre-generated
Christoph Lassner's avatar
Christoph Lassner committed
6
7
(you can find them at
`../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png`).
Christoph Lassner's avatar
Christoph Lassner committed
8
9
10
The camera parameters are assumed given. The scene is initialized with
random spheres. Gradient-based optimization is used to optimize sphere
parameters and prune spheres to converge to a 3D representation.
Christoph Lassner's avatar
Christoph Lassner committed
11
12
13
14

This example is not available yet through the 'unified' interface,
because opacity support has not landed in PyTorch3D for general data
structures yet.
Christoph Lassner's avatar
Christoph Lassner committed
15
"""
Christoph Lassner's avatar
Christoph Lassner committed
16
import math
Christoph Lassner's avatar
Christoph Lassner committed
17
from os import path
Christoph Lassner's avatar
Christoph Lassner committed
18
import logging
Christoph Lassner's avatar
Christoph Lassner committed
19
20
21
22
23
24
25
26
27

import cv2
import imageio
import numpy as np
import torch
from pytorch3d.renderer.points.pulsar import Renderer
from torch import nn, optim


Christoph Lassner's avatar
Christoph Lassner committed
28
29
30
31
32
33
LOGGER = logging.getLogger(__name__)
N_POINTS = 400_000
WIDTH = 1_000
HEIGHT = 1_000
VISUALIZE_IDS = [0, 1]
DEVICE = torch.device("cuda")
Christoph Lassner's avatar
Christoph Lassner committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55


class SceneModel(nn.Module):
    """
    A simple scene model to demonstrate use of pulsar in PyTorch modules.

    The scene model is parameterized with sphere locations (vert_pos),
    channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
    camera rotation (cam_rot) and sensor focal length and width (cam_sensor).

    The forward method of the model renders this scene description. Any
    of these parameters could instead be passed as inputs to the forward
    method and come from a different model. Optionally, camera parameters can
    be provided to the forward method in which case the scene is rendered
    using those parameters.
    """

    def __init__(self):
        super(SceneModel, self).__init__()
        self.gamma = 1.0
        # Points.
        torch.manual_seed(1)
Christoph Lassner's avatar
Christoph Lassner committed
56
        vert_pos = torch.rand((1, N_POINTS, 3), dtype=torch.float32) * 10.0
Christoph Lassner's avatar
Christoph Lassner committed
57
58
59
60
61
62
        vert_pos[:, :, 2] += 25.0
        vert_pos[:, :, :2] -= 5.0
        self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
        self.register_parameter(
            "vert_col",
            nn.Parameter(
Christoph Lassner's avatar
Christoph Lassner committed
63
                torch.ones(1, N_POINTS, 3, dtype=torch.float32) * 0.5,
Christoph Lassner's avatar
Christoph Lassner committed
64
65
66
67
68
69
                requires_grad=True,
            ),
        )
        self.register_parameter(
            "vert_rad",
            nn.Parameter(
Christoph Lassner's avatar
Christoph Lassner committed
70
                torch.ones(1, N_POINTS, dtype=torch.float32) * 0.05, requires_grad=True
Christoph Lassner's avatar
Christoph Lassner committed
71
72
73
74
75
            ),
        )
        self.register_parameter(
            "vert_opy",
            nn.Parameter(
Christoph Lassner's avatar
Christoph Lassner committed
76
                torch.ones(1, N_POINTS, dtype=torch.float32), requires_grad=True
Christoph Lassner's avatar
Christoph Lassner committed
77
78
79
80
81
82
83
84
85
86
87
            ),
        )
        self.register_buffer(
            "cam_params",
            torch.tensor(
                [
                    [
                        np.sin(angle) * 35.0,
                        0.0,
                        30.0 - np.cos(angle) * 35.0,
                        0.0,
Christoph Lassner's avatar
Christoph Lassner committed
88
                        -angle + math.pi,
Christoph Lassner's avatar
Christoph Lassner committed
89
90
91
92
93
94
95
96
97
                        0.0,
                        5.0,
                        2.0,
                    ]
                    for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]
                ],
                dtype=torch.float32,
            ),
        )
Christoph Lassner's avatar
Christoph Lassner committed
98
        self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
Christoph Lassner's avatar
Christoph Lassner committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

    def forward(self, cam=None):
        if cam is None:
            cam = self.cam_params
            n_views = 8
        else:
            n_views = 1
        return self.renderer.forward(
            self.vert_pos.expand(n_views, -1, -1),
            self.vert_col.expand(n_views, -1, -1),
            self.vert_rad.expand(n_views, -1),
            cam,
            self.gamma,
            45.0,
        )


Christoph Lassner's avatar
Christoph Lassner committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
def cli():
    """
    Simple demonstration for a multi-view 3D reconstruction using pulsar.

    This example makes use of opacity, which is not yet supported through
    the unified PyTorch3D interface.

    Writes to `multiview.gif`.
    """
    LOGGER.info("Loading reference...")
    # Load reference.
    ref = torch.stack(
        [
            torch.from_numpy(
                imageio.imread(
                    "../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png"
                    % idx
                )
            ).to(torch.float32)
            / 255.0
            for idx in range(8)
        ]
    ).to(DEVICE)
    # Set up model.
    model = SceneModel().to(DEVICE)
    # Optimizer.
    optimizer = optim.SGD(
        [
            {"params": [model.vert_col], "lr": 1e-1},
            {"params": [model.vert_rad], "lr": 1e-3},
            {"params": [model.vert_pos], "lr": 1e-3},
Christoph Lassner's avatar
Christoph Lassner committed
147
148
        ]
    )
Christoph Lassner's avatar
Christoph Lassner committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165

    # For visualization.
    angle = 0.0
    LOGGER.info("Writing video to `%s`.", path.abspath("multiview.avi"))
    writer = imageio.get_writer("multiview.gif", format="gif", fps=25)

    # Optimize.
    for i in range(300):
        optimizer.zero_grad()
        result = model()
        # Visualize.
        result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
        cv2.imshow("opt", result_im[0, :, :, ::-1])
        overlay_img = np.ascontiguousarray(
            ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
                0, :, :, ::-1
            ]
Christoph Lassner's avatar
Christoph Lassner committed
166
        )
Christoph Lassner's avatar
Christoph Lassner committed
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
        overlay_img = cv2.putText(
            overlay_img,
            "Step %d" % (i),
            (10, 40),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (0, 0, 0),
            2,
            cv2.LINE_AA,
            False,
        )
        cv2.imshow("overlay", overlay_img)
        cv2.waitKey(1)
        # Update.
        loss = ((result - ref) ** 2).sum()
        LOGGER.info("loss %d: %f", i, loss.item())
        loss.backward()
        optimizer.step()
        # Cleanup.
        with torch.no_grad():
            model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
            # Remove points.
            model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
            model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
            vd = (
                (model.vert_col - torch.ones(1, 1, 3, dtype=torch.float32).to(DEVICE))
                .abs()
                .sum(dim=2)
            )
            model.vert_pos.data[vd <= 0.2] = -1000.0
        # Rotating visualization.
        cam_control = torch.tensor(
Christoph Lassner's avatar
Christoph Lassner committed
199
            [
Christoph Lassner's avatar
Christoph Lassner committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
                [
                    np.sin(angle) * 35.0,
                    0.0,
                    30.0 - np.cos(angle) * 35.0,
                    0.0,
                    -angle + math.pi,
                    0.0,
                    5.0,
                    2.0,
                ]
            ],
            dtype=torch.float32,
        ).to(DEVICE)
        with torch.no_grad():
            result = model.forward(cam=cam_control)[0]
            result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
            cv2.imshow("vis", result_im[:, :, ::-1])
            writer.append_data(result_im)
            angle += 0.05
    writer.close()
    LOGGER.info("Done.")


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    cli()