test_render_meshes.py 13.9 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.


"""
Sanity checks for output images from the renderer.
"""
import unittest
from pathlib import Path
9
10

import numpy as np
facebook-github-bot's avatar
facebook-github-bot committed
11
import torch
Nikhila Ravi's avatar
Nikhila Ravi committed
12
from common_testing import TestCaseMixin, load_rgb_image
facebook-github-bot's avatar
facebook-github-bot committed
13
from PIL import Image
14
from pytorch3d.io import load_objs_as_meshes
15
from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform
facebook-github-bot's avatar
facebook-github-bot committed
16
17
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.materials import Materials
18
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
facebook-github-bot's avatar
facebook-github-bot committed
19
20
21
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import (
    BlendParams,
22
    HardFlatShader,
Patrick Labatut's avatar
Patrick Labatut committed
23
    HardGouraudShader,
24
25
26
    HardPhongShader,
    SoftSilhouetteShader,
    TexturedSoftPhongShader,
facebook-github-bot's avatar
facebook-github-bot committed
27
28
29
30
31
)
from pytorch3d.renderer.mesh.texturing import Textures
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere

32

Nikhila Ravi's avatar
Nikhila Ravi committed
33
# If DEBUG=True, save out images generated in the tests for debugging.
facebook-github-bot's avatar
facebook-github-bot committed
34
35
36
37
38
# All saved images have prefix DEBUG_
DEBUG = False
DATA_DIR = Path(__file__).resolve().parent / "data"


Nikhila Ravi's avatar
Nikhila Ravi committed
39
class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
40
41
    def test_simple_sphere(self, elevated_camera=False):
        """
Patrick Labatut's avatar
Patrick Labatut committed
42
        Test output of phong and gouraud shading matches a reference image using
facebook-github-bot's avatar
facebook-github-bot committed
43
44
45
46
47
48
49
50
51
52
53
54
55
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
56
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)
facebook-github-bot's avatar
facebook-github-bot committed
57
58
59

        # Init rasterizer settings
        if elevated_camera:
60
61
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
facebook-github-bot's avatar
facebook-github-bot committed
62
            postfix = "_elevated_camera"
63
64
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
facebook-github-bot's avatar
facebook-github-bot committed
65
        else:
66
            # No elevation or azimuth rotation
facebook-github-bot's avatar
facebook-github-bot committed
67
68
69
70
71
72
73
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
74
75
76
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
77
            image_size=512, blur_radius=0.0, faces_per_pixel=1
78
        )
79
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
Nikhila Ravi's avatar
Nikhila Ravi committed
80
81
82
83
84
85
86
87

        # Test several shaders
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
88
            shader = shader_init(lights=lights, cameras=cameras, materials=materials)
Nikhila Ravi's avatar
Nikhila Ravi committed
89
90
91
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            filename = "simple_sphere_light_%s%s.png" % (name, postfix)
Nikhila Ravi's avatar
Nikhila Ravi committed
92
            image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
Nikhila Ravi's avatar
Nikhila Ravi committed
93
94
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
Nikhila Ravi's avatar
Nikhila Ravi committed
95
                filename = "DEBUG_%s" % filename
Nikhila Ravi's avatar
Nikhila Ravi committed
96
97
98
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
Nikhila Ravi's avatar
Nikhila Ravi committed
99
            self.assertClose(rgb, image_ref, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
100

101
102
103
104
105
106
        ########################################################
        # Move the light to the +z axis in world space so it is
        # behind the sphere. Note that +Z is in, +Y up,
        # +X left for both world and camera space.
        ########################################################
        lights.location[..., 2] = -2.0
Nikhila Ravi's avatar
Nikhila Ravi committed
107
108
109
        phong_shader = HardPhongShader(
            lights=lights, cameras=cameras, materials=materials
        )
110
        phong_renderer = MeshRenderer(rasterizer=rasterizer, shader=phong_shader)
Nikhila Ravi's avatar
Nikhila Ravi committed
111
        images = phong_renderer(sphere_mesh, lights=lights)
facebook-github-bot's avatar
facebook-github-bot committed
112
113
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
114
            filename = "DEBUG_simple_sphere_dark%s.png" % postfix
facebook-github-bot's avatar
facebook-github-bot committed
115
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
116
                DATA_DIR / filename
facebook-github-bot's avatar
facebook-github-bot committed
117
118
119
            )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
120
121
122
123
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix, DATA_DIR
        )
        self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
124
125
126

    def test_simple_sphere_elevated_camera(self):
        """
Patrick Labatut's avatar
Patrick Labatut committed
127
        Test output of phong and gouraud shading matches a reference image using
facebook-github-bot's avatar
facebook-github-bot committed
128
129
130
131
132
133
134
135
        the default values for the light sources.

        The rendering is performed with a camera that has non-zero elevation.
        """
        self.test_simple_sphere(elevated_camera=True)

    def test_simple_sphere_batched(self):
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
136
137
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
facebook-github-bot's avatar
facebook-github-bot committed
138
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
139
        batch_size = 20
facebook-github-bot's avatar
facebook-github-bot committed
140
141
        device = torch.device("cuda:0")

Nikhila Ravi's avatar
Nikhila Ravi committed
142
        # Init mesh with vertex textures.
facebook-github-bot's avatar
facebook-github-bot committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_meshes = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
158
            image_size=512, blur_radius=0.0, faces_per_pixel=1
facebook-github-bot's avatar
facebook-github-bot committed
159
160
161
162
163
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
164
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
facebook-github-bot's avatar
facebook-github-bot committed
165
166

        # Init renderer
167
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
Nikhila Ravi's avatar
Nikhila Ravi committed
168
169
170
171
172
173
        shaders = {
            "phong": HardGouraudShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
174
            shader = shader_init(lights=lights, cameras=cameras, materials=materials)
Nikhila Ravi's avatar
Nikhila Ravi committed
175
176
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
Nikhila Ravi's avatar
Nikhila Ravi committed
177
178
179
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s.png" % name, DATA_DIR
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
180
181
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
Nikhila Ravi's avatar
Nikhila Ravi committed
182
                self.assertClose(rgb, image_ref, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

    def test_silhouette_with_grad(self):
        """
        Test silhouette blending. Also check that gradient calculation works.
        """
        device = torch.device("cuda:0")
        ref_filename = "test_silhouette.png"
        image_ref_filename = DATA_DIR / ref_filename
        sphere_mesh = ico_sphere(5, device)
        verts, faces = sphere_mesh.get_mesh_verts_faces(0)
        sphere_mesh = Meshes(verts=[verts], faces=[faces])

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=80,
        )

        # Init rasterizer settings
203
        R, T = look_at_view_transform(2.7, 0, 0)
facebook-github-bot's avatar
facebook-github-bot committed
204
205
206
207
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init renderer
        renderer = MeshRenderer(
208
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
209
            shader=SoftSilhouetteShader(blend_params=blend_params),
facebook-github-bot's avatar
facebook-github-bot committed
210
211
212
213
214
        )
        images = renderer(sphere_mesh)
        alpha = images[0, ..., 3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save(
215
                DATA_DIR / "DEBUG_silhouette.png"
facebook-github-bot's avatar
facebook-github-bot committed
216
217
218
219
220
            )

        with Image.open(image_ref_filename) as raw_image_ref:
            image_ref = torch.from_numpy(np.array(raw_image_ref))
        image_ref = image_ref.to(dtype=torch.float32) / 255.0
Nikhila Ravi's avatar
Nikhila Ravi committed
221
        self.assertClose(alpha, image_ref, atol=0.055)
facebook-github-bot's avatar
facebook-github-bot committed
222
223
224
225
226
227
228
229
230
231

        # Check grad exist
        verts.requires_grad = True
        sphere_mesh = Meshes(verts=[verts], faces=[faces])
        images = renderer(sphere_mesh)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

    def test_texture_map(self):
        """
232
233
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
facebook-github-bot's avatar
facebook-github-bot committed
234
235
        """
        device = torch.device("cuda:0")
Nikhila Ravi's avatar
Nikhila Ravi committed
236
237
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"
facebook-github-bot's avatar
facebook-github-bot committed
238
239

        # Load mesh + texture
240
        mesh = load_objs_as_meshes([obj_filename], device=device)
facebook-github-bot's avatar
facebook-github-bot committed
241
242

        # Init rasterizer settings
243
        R, T = look_at_view_transform(2.7, 0, 0)
facebook-github-bot's avatar
facebook-github-bot committed
244
245
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
246
            image_size=512, blur_radius=0.0, faces_per_pixel=1
facebook-github-bot's avatar
facebook-github-bot committed
247
248
249
250
251
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
252
253
254
255

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]
facebook-github-bot's avatar
facebook-github-bot committed
256
257
258

        # Init renderer
        renderer = MeshRenderer(
259
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
260
            shader=TexturedSoftPhongShader(
facebook-github-bot's avatar
facebook-github-bot committed
261
262
263
264
265
                lights=lights, cameras=cameras, materials=materials
            ),
        )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
266
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)
facebook-github-bot's avatar
facebook-github-bot committed
267

Nikhila Ravi's avatar
Nikhila Ravi committed
268
269
270
271
272
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
facebook-github-bot's avatar
facebook-github-bot committed
273

Nikhila Ravi's avatar
Nikhila Ravi committed
274
275
276
277
278
279
280
281
282
283
            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)
facebook-github-bot's avatar
facebook-github-bot committed
284
285

        # Check grad exists
286
        [verts] = mesh.verts_list()
facebook-github-bot's avatar
facebook-github-bot committed
287
        verts.requires_grad = True
288
        mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures)
289
        images = renderer(mesh2)
facebook-github-bot's avatar
facebook-github-bot committed
290
291
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
292

293
294
295
296
297
298
299
300
301
302
303
        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
304
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)
305

Nikhila Ravi's avatar
Nikhila Ravi committed
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)
323

324
325
326
        #################################
        # Add blurring to rasterization
        #################################
327
328
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
329
330
331
332
333
334
335
336
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
        )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
337
        image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR)
338

Nikhila Ravi's avatar
Nikhila Ravi committed
339
340
341
342
343
344
345
346
347
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
348
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
349
350
351
352
353
354
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png"
                )
355

Nikhila Ravi's avatar
Nikhila Ravi committed
356
            self.assertClose(rgb, image_ref, atol=0.05)