test_render_meshes.py 20.2 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.


"""
Sanity checks for output images from the renderer.
"""
import unittest
from pathlib import Path
9
10

import numpy as np
facebook-github-bot's avatar
facebook-github-bot committed
11
import torch
Nikhila Ravi's avatar
Nikhila Ravi committed
12
from common_testing import TestCaseMixin, load_rgb_image
facebook-github-bot's avatar
facebook-github-bot committed
13
from PIL import Image
Nikhila Ravi's avatar
Nikhila Ravi committed
14
from pytorch3d.io import load_obj
15
from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform
facebook-github-bot's avatar
facebook-github-bot committed
16
17
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.materials import Materials
Nikhila Ravi's avatar
Nikhila Ravi committed
18
from pytorch3d.renderer.mesh import TexturesAtlas, TexturesUV, TexturesVertex
19
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
facebook-github-bot's avatar
facebook-github-bot committed
20
21
22
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import (
    BlendParams,
23
    HardFlatShader,
Patrick Labatut's avatar
Patrick Labatut committed
24
    HardGouraudShader,
25
26
27
    HardPhongShader,
    SoftSilhouetteShader,
    TexturedSoftPhongShader,
facebook-github-bot's avatar
facebook-github-bot committed
28
)
29
from pytorch3d.structures.meshes import Meshes, join_mesh
facebook-github-bot's avatar
facebook-github-bot committed
30
31
from pytorch3d.utils.ico_sphere import ico_sphere

32

Nikhila Ravi's avatar
Nikhila Ravi committed
33
# If DEBUG=True, save out images generated in the tests for debugging.
facebook-github-bot's avatar
facebook-github-bot committed
34
35
36
37
38
# All saved images have prefix DEBUG_
DEBUG = False
DATA_DIR = Path(__file__).resolve().parent / "data"


Nikhila Ravi's avatar
Nikhila Ravi committed
39
class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
40
41
    def test_simple_sphere(self, elevated_camera=False):
        """
Patrick Labatut's avatar
Patrick Labatut committed
42
        Test output of phong and gouraud shading matches a reference image using
facebook-github-bot's avatar
facebook-github-bot committed
43
44
45
46
47
48
49
50
51
52
53
54
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
Nikhila Ravi's avatar
Nikhila Ravi committed
55
56
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
57
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)
facebook-github-bot's avatar
facebook-github-bot committed
58
59
60

        # Init rasterizer settings
        if elevated_camera:
61
62
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
facebook-github-bot's avatar
facebook-github-bot committed
63
            postfix = "_elevated_camera"
64
65
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
facebook-github-bot's avatar
facebook-github-bot committed
66
        else:
67
            # No elevation or azimuth rotation
facebook-github-bot's avatar
facebook-github-bot committed
68
69
70
71
72
73
74
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
75
76
77
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
78
            image_size=512, blur_radius=0.0, faces_per_pixel=1
79
        )
80
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
81
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
Nikhila Ravi's avatar
Nikhila Ravi committed
82
83
84
85
86
87
88
89

        # Test several shaders
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
90
91
92
93
94
95
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
96
97
98
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            filename = "simple_sphere_light_%s%s.png" % (name, postfix)
Nikhila Ravi's avatar
Nikhila Ravi committed
99
            image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
Nikhila Ravi's avatar
Nikhila Ravi committed
100
            rgb = images[0, ..., :3].squeeze().cpu()
Nikhila Ravi's avatar
Nikhila Ravi committed
101

Nikhila Ravi's avatar
Nikhila Ravi committed
102
            if DEBUG:
Nikhila Ravi's avatar
Nikhila Ravi committed
103
                filename = "DEBUG_%s" % filename
Nikhila Ravi's avatar
Nikhila Ravi committed
104
105
106
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
Nikhila Ravi's avatar
Nikhila Ravi committed
107
            self.assertClose(rgb, image_ref, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
108

109
110
111
112
113
114
        ########################################################
        # Move the light to the +z axis in world space so it is
        # behind the sphere. Note that +Z is in, +Y up,
        # +X left for both world and camera space.
        ########################################################
        lights.location[..., 2] = -2.0
Nikhila Ravi's avatar
Nikhila Ravi committed
115
        phong_shader = HardPhongShader(
116
117
118
119
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
Nikhila Ravi's avatar
Nikhila Ravi committed
120
        )
121
        phong_renderer = MeshRenderer(rasterizer=rasterizer, shader=phong_shader)
Nikhila Ravi's avatar
Nikhila Ravi committed
122
        images = phong_renderer(sphere_mesh, lights=lights)
facebook-github-bot's avatar
facebook-github-bot committed
123
124
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
125
            filename = "DEBUG_simple_sphere_dark%s.png" % postfix
facebook-github-bot's avatar
facebook-github-bot committed
126
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
127
                DATA_DIR / filename
facebook-github-bot's avatar
facebook-github-bot committed
128
129
130
            )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
131
132
133
134
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix, DATA_DIR
        )
        self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
135
136
137

    def test_simple_sphere_elevated_camera(self):
        """
Patrick Labatut's avatar
Patrick Labatut committed
138
        Test output of phong and gouraud shading matches a reference image using
facebook-github-bot's avatar
facebook-github-bot committed
139
140
141
142
143
144
145
146
        the default values for the light sources.

        The rendering is performed with a camera that has non-zero elevation.
        """
        self.test_simple_sphere(elevated_camera=True)

    def test_simple_sphere_batched(self):
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
147
148
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
facebook-github-bot's avatar
facebook-github-bot committed
149
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
150
        batch_size = 5
facebook-github-bot's avatar
facebook-github-bot committed
151
152
        device = torch.device("cuda:0")

Nikhila Ravi's avatar
Nikhila Ravi committed
153
        # Init mesh with vertex textures.
facebook-github-bot's avatar
facebook-github-bot committed
154
155
156
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
Nikhila Ravi's avatar
Nikhila Ravi committed
157
158
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
facebook-github-bot's avatar
facebook-github-bot committed
159
160
161
162
163
164
165
166
167
168
169
        sphere_meshes = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
170
            image_size=512, blur_radius=0.0, faces_per_pixel=1
facebook-github-bot's avatar
facebook-github-bot committed
171
172
173
174
175
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
176
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
177
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
facebook-github-bot's avatar
facebook-github-bot committed
178
179

        # Init renderer
180
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
Nikhila Ravi's avatar
Nikhila Ravi committed
181
        shaders = {
182
            "phong": HardPhongShader,
Nikhila Ravi's avatar
Nikhila Ravi committed
183
184
185
186
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
187
188
189
190
191
192
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
193
194
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
Nikhila Ravi's avatar
Nikhila Ravi committed
195
196
197
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s.png" % name, DATA_DIR
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
198
199
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
Nikhila Ravi's avatar
Nikhila Ravi committed
200
201
202
203
204
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s.png" % name
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
Nikhila Ravi's avatar
Nikhila Ravi committed
205
                self.assertClose(rgb, image_ref, atol=0.05)
facebook-github-bot's avatar
facebook-github-bot committed
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222

    def test_silhouette_with_grad(self):
        """
        Test silhouette blending. Also check that gradient calculation works.
        """
        device = torch.device("cuda:0")
        ref_filename = "test_silhouette.png"
        image_ref_filename = DATA_DIR / ref_filename
        sphere_mesh = ico_sphere(5, device)
        verts, faces = sphere_mesh.get_mesh_verts_faces(0)
        sphere_mesh = Meshes(verts=[verts], faces=[faces])

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=80,
223
            clip_barycentric_coords=True,
facebook-github-bot's avatar
facebook-github-bot committed
224
225
226
        )

        # Init rasterizer settings
227
        R, T = look_at_view_transform(2.7, 0, 0)
facebook-github-bot's avatar
facebook-github-bot committed
228
229
230
231
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init renderer
        renderer = MeshRenderer(
232
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
233
            shader=SoftSilhouetteShader(blend_params=blend_params),
facebook-github-bot's avatar
facebook-github-bot committed
234
235
236
237
238
        )
        images = renderer(sphere_mesh)
        alpha = images[0, ..., 3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save(
239
                DATA_DIR / "DEBUG_silhouette.png"
facebook-github-bot's avatar
facebook-github-bot committed
240
241
242
243
            )

        with Image.open(image_ref_filename) as raw_image_ref:
            image_ref = torch.from_numpy(np.array(raw_image_ref))
Nikhila Ravi's avatar
Nikhila Ravi committed
244

facebook-github-bot's avatar
facebook-github-bot committed
245
        image_ref = image_ref.to(dtype=torch.float32) / 255.0
Nikhila Ravi's avatar
Nikhila Ravi committed
246
        self.assertClose(alpha, image_ref, atol=0.055)
facebook-github-bot's avatar
facebook-github-bot committed
247
248
249
250
251
252
253
254
255
256

        # Check grad exist
        verts.requires_grad = True
        sphere_mesh = Meshes(verts=[verts], faces=[faces])
        images = renderer(sphere_mesh)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

    def test_texture_map(self):
        """
257
258
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
facebook-github-bot's avatar
facebook-github-bot committed
259
260
        """
        device = torch.device("cuda:0")
Nikhila Ravi's avatar
Nikhila Ravi committed
261
262
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"
facebook-github-bot's avatar
facebook-github-bot committed
263
264

        # Load mesh + texture
Nikhila Ravi's avatar
Nikhila Ravi committed
265
266
267
268
269
270
271
272
273
        verts, faces, aux = load_obj(
            obj_filename, device=device, load_textures=True, texture_wrap=None
        )
        tex_map = list(aux.texture_images.values())[0]
        tex_map = tex_map[None, ...].to(faces.textures_idx.device)
        textures = TexturesUV(
            maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs]
        )
        mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures)
facebook-github-bot's avatar
facebook-github-bot committed
274
275

        # Init rasterizer settings
276
        R, T = look_at_view_transform(2.7, 0, 0)
facebook-github-bot's avatar
facebook-github-bot committed
277
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
Nikhila Ravi's avatar
Nikhila Ravi committed
278

facebook-github-bot's avatar
facebook-github-bot committed
279
        raster_settings = RasterizationSettings(
Nikhila Ravi's avatar
Nikhila Ravi committed
280
            image_size=512, blur_radius=0.0, faces_per_pixel=1
facebook-github-bot's avatar
facebook-github-bot committed
281
282
283
284
285
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
286
287
288
289

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]
facebook-github-bot's avatar
facebook-github-bot committed
290

291
292
293
294
295
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
facebook-github-bot's avatar
facebook-github-bot committed
296
297
        # Init renderer
        renderer = MeshRenderer(
298
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
299
            shader=TexturedSoftPhongShader(
300
301
302
303
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
facebook-github-bot's avatar
facebook-github-bot committed
304
305
306
307
            ),
        )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
308
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)
facebook-github-bot's avatar
facebook-github-bot committed
309

Nikhila Ravi's avatar
Nikhila Ravi committed
310
311
312
313
314
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
facebook-github-bot's avatar
facebook-github-bot committed
315

Nikhila Ravi's avatar
Nikhila Ravi committed
316
317
318
319
320
321
322
323
324
325
            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)
facebook-github-bot's avatar
facebook-github-bot committed
326
327

        # Check grad exists
328
        [verts] = mesh.verts_list()
facebook-github-bot's avatar
facebook-github-bot committed
329
        verts.requires_grad = True
330
        mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures)
331
        images = renderer(mesh2)
facebook-github-bot's avatar
facebook-github-bot committed
332
333
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
334

335
336
337
338
339
340
341
342
343
344
345
        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
346
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)
347

Nikhila Ravi's avatar
Nikhila Ravi committed
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)
365

366
367
368
        #################################
        # Add blurring to rasterization
        #################################
369
370
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
371
372
373
374
375
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
376
            clip_barycentric_coords=True,
377
378
379
        )

        # Load reference image
Nikhila Ravi's avatar
Nikhila Ravi committed
380
        image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR)
381

Nikhila Ravi's avatar
Nikhila Ravi committed
382
383
384
385
386
387
388
389
390
        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
391
            )
Nikhila Ravi's avatar
Nikhila Ravi committed
392
393
394
395
396
397
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png"
                )
398

Nikhila Ravi's avatar
Nikhila Ravi committed
399
            self.assertClose(rgb, image_ref, atol=0.05)
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425

    def test_joined_spheres(self):
        """
        Test a list of Meshes can be joined as a single mesh and
        the single mesh is rendered correctly with Phong, Gouraud
        and Flat Shaders.
        """
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        # Initialize a list containing two ico spheres of different sizes.
        sphere_list = [ico_sphere(3, device), ico_sphere(4, device)]
        # [(42 verts, 80 faces), (162 verts, 320 faces)]
        # The scale the vertices need to be set at to resize the spheres
        scales = [0.25, 1]
        # The distance the spheres ought to be offset horizontally to prevent overlap.
        offsets = [1.2, -0.3]
        # Initialize a list containing the adjusted sphere meshes.
        sphere_mesh_list = []
        for i in range(len(sphere_list)):
            verts = sphere_list[i].verts_padded() * scales[i]
            verts[0, :, 0] += offsets[i]
            sphere_mesh_list.append(
                Meshes(verts=verts, faces=sphere_list[i].faces_padded())
            )
        joined_sphere_mesh = join_mesh(sphere_mesh_list)
Nikhila Ravi's avatar
Nikhila Ravi committed
426
427
        joined_sphere_mesh.textures = TexturesVertex(
            verts_features=torch.ones_like(joined_sphere_mesh.verts_padded())
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            image = renderer(joined_sphere_mesh)
            rgb = image[..., :3].squeeze().cpu()
            if DEBUG:
                file_name = "DEBUG_joined_spheres_%s.png" % name
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / file_name
                )
            image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
Nikhila Ravi's avatar
Nikhila Ravi committed
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[aux.texture_atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True
        )

        # Init shader settings
        materials = Materials(device=device, specular_color=((0, 0, 0),), shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_atlas_8x8_back.png"
            )

        self.assertClose(rgb, image_ref, atol=0.05)