test_blending.py 14.6 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

import unittest

5
6
import numpy as np
import torch
7
from common_testing import TestCaseMixin
facebook-github-bot's avatar
facebook-github-bot committed
8
9
10
11
12
13
14
15
16
from pytorch3d.renderer.blending import (
    BlendParams,
    hard_rgb_blend,
    sigmoid_alpha_blend,
    softmax_rgb_blend,
)
from pytorch3d.renderer.mesh.rasterizer import Fragments


Nikhila Ravi's avatar
Nikhila Ravi committed
17
def sigmoid_blend_naive_loop(colors, fragments, blend_params):
facebook-github-bot's avatar
facebook-github-bot committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    """
    Naive for loop based implementation of distance based alpha calculation.
    Only for test purposes.
    """
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    sigma = blend_params.sigma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0

                # Loop over k faces and calculate 2D distance based probability
                # map.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        alpha *= 1.0 - prob  # cumulative product
                pixel_colors[n, h, w, :3] = colors[n, h, w, 0, :]
                pixel_colors[n, h, w, 3] = 1.0 - alpha

44
    return pixel_colors
facebook-github-bot's avatar
facebook-github-bot committed
45
46


47
48
49
50
51
52
53
54
55
56
def sigmoid_alpha_blend_vectorized(colors, fragments, blend_params) -> torch.Tensor:
    N, H, W, K = fragments.pix_to_face.shape
    pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)
    mask = fragments.pix_to_face >= 0
    prob = torch.sigmoid(-fragments.dists / blend_params.sigma) * mask
    pixel_colors[..., :3] = colors[..., 0, :]
    pixel_colors[..., 3] = 1.0 - torch.prod((1.0 - prob), dim=-1)
    return pixel_colors


57
def sigmoid_blend_naive_loop_backward(grad_images, images, fragments, blend_params):
Nikhila Ravi's avatar
Nikhila Ravi committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    sigma = blend_params.sigma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    grad_distances = torch.zeros((N, H, W, K), dtype=dists.dtype, device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0 - images[n, h, w, 3]
                grad_alpha = grad_images[n, h, w, 3]
                # Loop over k faces and calculate 2D distance based probability
                # map.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        grad_distances[n, h, w, k] = (
                            grad_alpha * (-1.0 / sigma) * prob * alpha
                        )
    return grad_distances


facebook-github-bot's avatar
facebook-github-bot committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
def softmax_blend_naive(colors, fragments, blend_params):
    """
    Naive for loop based implementation of softmax blending.
    Only for test purposes.
    """
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    zbuf = fragments.zbuf
    sigma = blend_params.sigma
    gamma = blend_params.gamma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)

    # Near and far clipping planes
    zfar = 100.0
    znear = 1.0

    bk_color = blend_params.background_color
    if not torch.is_tensor(bk_color):
        bk_color = torch.tensor(bk_color, dtype=colors.dtype, device=device)

    # Background color component
    delta = np.exp(1e-10 / gamma) * 1e-10
    delta = torch.tensor(delta).to(device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
113
                weights_k = torch.zeros(K, device=device)
facebook-github-bot's avatar
facebook-github-bot committed
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
                zmax = 0.0

                # Loop over K to find max z.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
                        if zinv > zmax:
                            zmax = zinv

                # Loop over K faces to calculate 2D distance based probability
                # map and zbuf based weights for colors.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        alpha *= 1.0 - prob  # cumulative product
                        weights_k[k] = prob * torch.exp((zinv - zmax) / gamma)

                denom = weights_k.sum() + delta
                weights = weights_k / denom
                cols = (weights[..., None] * colors[n, h, w, :, :]).sum(dim=0)
                pixel_colors[n, h, w, :3] = cols
                pixel_colors[n, h, w, :3] += (delta / denom) * bk_color
                pixel_colors[n, h, w, 3] = 1.0 - alpha

139
    return pixel_colors
facebook-github-bot's avatar
facebook-github-bot committed
140
141


142
class TestBlending(TestCaseMixin, unittest.TestCase):
facebook-github-bot's avatar
facebook-github-bot committed
143
144
145
    def setUp(self) -> None:
        torch.manual_seed(42)

Nikhila Ravi's avatar
Nikhila Ravi committed
146
    def _compare_impls(
147
        self, fn1, fn2, args1, args2, grad_var1=None, grad_var2=None, compare_grads=True
Nikhila Ravi's avatar
Nikhila Ravi committed
148
149
150
    ):
        out1 = fn1(*args1)
        out2 = fn2(*args2)
151
        self.assertClose(out1.cpu()[..., 3], out2.cpu()[..., 3], atol=1e-7)
Nikhila Ravi's avatar
Nikhila Ravi committed
152
153
154
155
156
157
158
159
160
161
162

        # Check gradients
        if not compare_grads:
            return

        grad_out = torch.randn_like(out1)
        (out1 * grad_out).sum().backward()
        self.assertTrue(hasattr(grad_var1, "grad"))

        (out2 * grad_out).sum().backward()
        self.assertTrue(hasattr(grad_var2, "grad"))
163
        self.assertClose(grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5)
Nikhila Ravi's avatar
Nikhila Ravi committed
164

facebook-github-bot's avatar
facebook-github-bot committed
165
166
    def test_hard_rgb_blend(self):
        N, H, W, K = 5, 10, 10, 20
167
        pix_to_face = torch.randint(low=-1, high=100, size=(N, H, W, K))
facebook-github-bot's avatar
facebook-github-bot committed
168
169
170
171
172
173
174
        bary_coords = torch.ones((N, H, W, K, 3))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,
            zbuf=pix_to_face,  # dummy
            dists=pix_to_face,  # dummy
        )
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
        colors = torch.randn((N, H, W, K, 3))
        blend_params = BlendParams(1e-4, 1e-4, (0.5, 0.5, 1))
        images = hard_rgb_blend(colors, fragments, blend_params)

        # Examine if the foreground colors are correct.
        is_foreground = pix_to_face[..., 0] >= 0
        self.assertClose(images[is_foreground][:, :3], colors[is_foreground][..., 0, :])

        # Examine if the background colors are correct.
        for i in range(3):  # i.e. RGB
            channel_color = blend_params.background_color[i]
            self.assertTrue(images[~is_foreground][..., i].eq(channel_color).all())

        # Examine the alpha channel is correct
        self.assertTrue(images[..., 3].eq(1).all())
facebook-github-bot's avatar
facebook-github-bot committed
190

Nikhila Ravi's avatar
Nikhila Ravi committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    def test_sigmoid_alpha_blend_manual_gradients(self):
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 3, 2
        device = torch.device("cuda")
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
206
        dists = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
Nikhila Ravi's avatar
Nikhila Ravi committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists,
        )
        blend_params = BlendParams(sigma=1e-3)
        pix_cols = sigmoid_blend_naive_loop(colors, fragments, blend_params)
        grad_out = torch.randn_like(pix_cols)

        # Backward pass
        pix_cols.backward(grad_out)
        grad_dists = sigmoid_blend_naive_loop_backward(
            grad_out, pix_cols, fragments, blend_params
        )
        self.assertTrue(torch.allclose(dists.grad, grad_dists, atol=1e-7))

    def test_sigmoid_alpha_blend_python(self):
facebook-github-bot's avatar
facebook-github-bot committed
225
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
226
        Test outputs of python tensorised function and python loop
facebook-github-bot's avatar
facebook-github-bot committed
227
228
        """

Nikhila Ravi's avatar
Nikhila Ravi committed
229
230
231
232
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
233
        N, S, K = 1, 4, 1
Nikhila Ravi's avatar
Nikhila Ravi committed
234
        device = torch.device("cuda")
235
        pix_to_face = torch.randint(low=-1, high=F, size=(N, S, S, K), device=device)
Nikhila Ravi's avatar
Nikhila Ravi committed
236
237
238
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

239
240
241
        dists1 = torch.randn(size=(N, S, S, K), device=device)
        dists2 = dists1.clone()
        dists1.requires_grad = True
facebook-github-bot's avatar
facebook-github-bot committed
242
        dists2.requires_grad = True
Nikhila Ravi's avatar
Nikhila Ravi committed
243

facebook-github-bot's avatar
facebook-github-bot committed
244
245
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
246
247
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
248
249
250
251
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
252
253
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
254
255
256
            dists=dists2,
        )

Nikhila Ravi's avatar
Nikhila Ravi committed
257
258
259
260
261
262
        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
263
            sigmoid_alpha_blend_vectorized,
Nikhila Ravi's avatar
Nikhila Ravi committed
264
265
266
267
268
269
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
facebook-github-bot's avatar
facebook-github-bot committed
270
271

    def test_softmax_rgb_blend(self):
272
        # Create dummy outputs of rasterization simulating a cube in the center
facebook-github-bot's avatar
facebook-github-bot committed
273
274
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
Nikhila Ravi's avatar
Nikhila Ravi committed
275
        device = torch.device("cuda")
Nikhila Ravi's avatar
Nikhila Ravi committed
276
277
278
        pix_to_face = torch.full(
            (N, S, S, K), fill_value=-1, dtype=torch.int64, device=device
        )
facebook-github-bot's avatar
facebook-github-bot committed
279
        h = int(S / 2)
Nikhila Ravi's avatar
Nikhila Ravi committed
280
281
282
        pix_to_face_full = torch.randint(
            size=(N, h, h, K), low=0, high=100, device=device
        )
facebook-github-bot's avatar
facebook-github-bot committed
283
284
285
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
Nikhila Ravi's avatar
Nikhila Ravi committed
286
        empty = torch.tensor([], device=device)
facebook-github-bot's avatar
facebook-github-bot committed
287

Nikhila Ravi's avatar
Nikhila Ravi committed
288
        random_sign_flip = torch.rand((N, S, S, K), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
289
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
290
        zbuf1 = torch.randn(size=(N, S, S, K), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
291
292
293

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
294
        dists1 = torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
facebook-github-bot's avatar
facebook-github-bot committed
295
296
297
298
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
Nikhila Ravi's avatar
Nikhila Ravi committed
299
        colors = torch.randn((N, S, S, K, 3), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
300
301
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
302
            bary_coords=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
303
304
305
306
307
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
308
            bary_coords=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
309
310
311
312
            zbuf=zbuf2,
            dists=dists2,
        )

Nikhila Ravi's avatar
Nikhila Ravi committed
313
314
315
316
317
318
319
320
321
322
323
324
        blend_params = BlendParams(sigma=1e-3)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)
        self._compare_impls(
            softmax_rgb_blend,
            softmax_blend_naive,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
facebook-github-bot's avatar
facebook-github-bot committed
325

Nikhila Ravi's avatar
Nikhila Ravi committed
326
327
328
329
330
    @staticmethod
    def bm_sigmoid_alpha_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
331
332
        device="cuda",
        backend: str = "pytorch",
Nikhila Ravi's avatar
Nikhila Ravi committed
333
334
335
336
337
338
339
    ):
        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
340
341
342
        pix_to_face = torch.randint(
            low=-1, high=F + 1, size=(N, S, S, K), device=device
        )
Nikhila Ravi's avatar
Nikhila Ravi committed
343
344
345
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

346
        dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
Nikhila Ravi's avatar
Nikhila Ravi committed
347
348
349
350
351
352
353
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        blend_params = BlendParams(sigma=1e-3)
354
355
356
357
358
359
360

        blend_fn = (
            sigmoid_alpha_blend_vectorized
            if backend == "pytorch"
            else sigmoid_alpha_blend
        )

Nikhila Ravi's avatar
Nikhila Ravi committed
361
362
363
364
        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
365
            images = blend_fn(colors, fragments, blend_params)
Nikhila Ravi's avatar
Nikhila Ravi committed
366
367
368
369
370
371
372
373
374
375
376
            images.sum().backward()
            torch.cuda.synchronize()

        return fn

    @staticmethod
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = "cpu",
377
        backend: str = "pytorch",
Nikhila Ravi's avatar
Nikhila Ravi committed
378
379
380
381
382
383
384
385
386
387
388
    ):
        if torch.cuda.is_available() and "cuda:" in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
389
390
391
        pix_to_face = torch.randint(
            low=-1, high=F + 1, size=(N, S, S, K), device=device
        )
Nikhila Ravi's avatar
Nikhila Ravi committed
392
393
394
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

395
        dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
Nikhila Ravi's avatar
Nikhila Ravi committed
396
397
        zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
        fragments = Fragments(
398
            pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1  # dummy
Nikhila Ravi's avatar
Nikhila Ravi committed
399
400
401
402
403
404
405
406
407
408
409
410
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
411
412

    def test_blend_params(self):
413
        """Test color parameter of BlendParams().
Nikhila Ravi's avatar
Nikhila Ravi committed
414
415
            Assert passed value overrides default value.
            """
416
417
418
419
        bp_default = BlendParams()
        bp_new = BlendParams(background_color=(0.5, 0.5, 0.5))
        self.assertEqual(bp_new.background_color, (0.5, 0.5, 0.5))
        self.assertEqual(bp_default.background_color, (1.0, 1.0, 1.0))