test_blending.py 14.6 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

import numpy as np
import unittest
import torch

from pytorch3d.renderer.blending import (
    BlendParams,
    hard_rgb_blend,
    sigmoid_alpha_blend,
    softmax_rgb_blend,
)
from pytorch3d.renderer.mesh.rasterizer import Fragments


Nikhila Ravi's avatar
Nikhila Ravi committed
16
def sigmoid_blend_naive_loop(colors, fragments, blend_params):
facebook-github-bot's avatar
facebook-github-bot committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    """
    Naive for loop based implementation of distance based alpha calculation.
    Only for test purposes.
    """
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    sigma = blend_params.sigma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0

                # Loop over k faces and calculate 2D distance based probability
                # map.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        alpha *= 1.0 - prob  # cumulative product
                pixel_colors[n, h, w, :3] = colors[n, h, w, 0, :]
                pixel_colors[n, h, w, 3] = 1.0 - alpha

43
    return pixel_colors
facebook-github-bot's avatar
facebook-github-bot committed
44
45


Nikhila Ravi's avatar
Nikhila Ravi committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def sigmoid_blend_naive_loop_backward(
    grad_images, images, fragments, blend_params
):
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    sigma = blend_params.sigma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    grad_distances = torch.zeros((N, H, W, K), dtype=dists.dtype, device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0 - images[n, h, w, 3]
                grad_alpha = grad_images[n, h, w, 3]
                # Loop over k faces and calculate 2D distance based probability
                # map.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        grad_distances[n, h, w, k] = (
                            grad_alpha * (-1.0 / sigma) * prob * alpha
                        )
    return grad_distances


facebook-github-bot's avatar
facebook-github-bot committed
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
def softmax_blend_naive(colors, fragments, blend_params):
    """
    Naive for loop based implementation of softmax blending.
    Only for test purposes.
    """
    pix_to_face = fragments.pix_to_face
    dists = fragments.dists
    zbuf = fragments.zbuf
    sigma = blend_params.sigma
    gamma = blend_params.gamma

    N, H, W, K = pix_to_face.shape
    device = pix_to_face.device
    pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=device)

    # Near and far clipping planes
    zfar = 100.0
    znear = 1.0

    bk_color = blend_params.background_color
    if not torch.is_tensor(bk_color):
        bk_color = torch.tensor(bk_color, dtype=colors.dtype, device=device)

    # Background color component
    delta = np.exp(1e-10 / gamma) * 1e-10
    delta = torch.tensor(delta).to(device=device)

    for n in range(N):
        for h in range(H):
            for w in range(W):
                alpha = 1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
104
                weights_k = torch.zeros(K, device=device)
facebook-github-bot's avatar
facebook-github-bot committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
                zmax = 0.0

                # Loop over K to find max z.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
                        if zinv > zmax:
                            zmax = zinv

                # Loop over K faces to calculate 2D distance based probability
                # map and zbuf based weights for colors.
                for k in range(K):
                    if pix_to_face[n, h, w, k] >= 0:
                        zinv = (zfar - zbuf[n, h, w, k]) / (zfar - znear)
                        prob = torch.sigmoid(-dists[n, h, w, k] / sigma)
                        alpha *= 1.0 - prob  # cumulative product
                        weights_k[k] = prob * torch.exp((zinv - zmax) / gamma)

                denom = weights_k.sum() + delta
                weights = weights_k / denom
                cols = (weights[..., None] * colors[n, h, w, :, :]).sum(dim=0)
                pixel_colors[n, h, w, :3] = cols
                pixel_colors[n, h, w, :3] += (delta / denom) * bk_color
                pixel_colors[n, h, w, 3] = 1.0 - alpha

130
    return pixel_colors
facebook-github-bot's avatar
facebook-github-bot committed
131
132
133
134
135
136


class TestBlending(unittest.TestCase):
    def setUp(self) -> None:
        torch.manual_seed(42)

Nikhila Ravi's avatar
Nikhila Ravi committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    def _compare_impls(
        self,
        fn1,
        fn2,
        args1,
        args2,
        grad_var1=None,
        grad_var2=None,
        compare_grads=True,
    ):

        out1 = fn1(*args1)
        out2 = fn2(*args2)
        self.assertTrue(torch.allclose(out1.cpu(), out2.cpu(), atol=1e-7))

        # Check gradients
        if not compare_grads:
            return

        grad_out = torch.randn_like(out1)
        (out1 * grad_out).sum().backward()
        self.assertTrue(hasattr(grad_var1, "grad"))

        (out2 * grad_out).sum().backward()
        self.assertTrue(hasattr(grad_var2, "grad"))
        self.assertTrue(
            torch.allclose(
                grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5
            )
        )

facebook-github-bot's avatar
facebook-github-bot committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    def test_hard_rgb_blend(self):
        N, H, W, K = 5, 10, 10, 20
        pix_to_face = torch.ones((N, H, W, K))
        bary_coords = torch.ones((N, H, W, K, 3))
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=bary_coords,
            zbuf=pix_to_face,  # dummy
            dists=pix_to_face,  # dummy
        )
        colors = bary_coords.clone()
        top_k = torch.randn((K, 3))
        colors[..., :, :] = top_k
        images = hard_rgb_blend(colors, fragments)
        expected_vals = torch.ones((N, H, W, 4))
        pix_cols = torch.ones_like(expected_vals[..., :3]) * top_k[0, :]
        expected_vals[..., :3] = pix_cols
        self.assertTrue(torch.allclose(images, expected_vals))

Nikhila Ravi's avatar
Nikhila Ravi committed
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
    def test_sigmoid_alpha_blend_manual_gradients(self):
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 3, 2
        device = torch.device("cuda")
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists = torch.randn(
            size=(N, S, S, K), requires_grad=True, device=device
        )
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists,
        )
        blend_params = BlendParams(sigma=1e-3)
        pix_cols = sigmoid_blend_naive_loop(colors, fragments, blend_params)
        grad_out = torch.randn_like(pix_cols)

        # Backward pass
        pix_cols.backward(grad_out)
        grad_dists = sigmoid_blend_naive_loop_backward(
            grad_out, pix_cols, fragments, blend_params
        )
        self.assertTrue(torch.allclose(dists.grad, grad_dists, atol=1e-7))

    def test_sigmoid_alpha_blend_python(self):
facebook-github-bot's avatar
facebook-github-bot committed
223
        """
Nikhila Ravi's avatar
Nikhila Ravi committed
224
        Test outputs of python tensorised function and python loop
facebook-github-bot's avatar
facebook-github-bot committed
225
226
        """

Nikhila Ravi's avatar
Nikhila Ravi committed
227
228
229
230
231
232
233
234
235
236
237
238
        # Create dummy outputs of rasterization
        torch.manual_seed(231)
        F = 32  # number of faces in the mesh
        # The python loop version is really slow so only using small input sizes.
        N, S, K = 2, 10, 5
        device = torch.device("cuda")
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
facebook-github-bot's avatar
facebook-github-bot committed
239
240
        random_sign_flip = torch.rand((N, S, S, K))
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
241
242
243
244
        dists1 = torch.randn(
            size=(N, S, S, K), requires_grad=True, device=device
        )
        dists2 = dists1.detach().clone()
facebook-github-bot's avatar
facebook-github-bot committed
245
        dists2.requires_grad = True
Nikhila Ravi's avatar
Nikhila Ravi committed
246

facebook-github-bot's avatar
facebook-github-bot committed
247
248
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
249
250
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
251
252
253
254
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
255
256
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
257
258
259
            dists=dists2,
        )

Nikhila Ravi's avatar
Nikhila Ravi committed
260
261
262
263
264
265
266
267
268
269
270
271
272
        blend_params = BlendParams(sigma=1e-2)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)

        self._compare_impls(
            sigmoid_alpha_blend,
            sigmoid_blend_naive_loop,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
facebook-github-bot's avatar
facebook-github-bot committed
273
274

    def test_softmax_rgb_blend(self):
275
        # Create dummy outputs of rasterization simulating a cube in the center
facebook-github-bot's avatar
facebook-github-bot committed
276
277
        # of the image with surrounding padded values.
        N, S, K = 1, 8, 2
Nikhila Ravi's avatar
Nikhila Ravi committed
278
279
280
281
        device = torch.device("cuda")
        pix_to_face = -torch.ones(
            (N, S, S, K), dtype=torch.int64, device=device
        )
facebook-github-bot's avatar
facebook-github-bot committed
282
        h = int(S / 2)
Nikhila Ravi's avatar
Nikhila Ravi committed
283
284
285
        pix_to_face_full = torch.randint(
            size=(N, h, h, K), low=0, high=100, device=device
        )
facebook-github-bot's avatar
facebook-github-bot committed
286
287
288
        s = int(S / 4)
        e = int(0.75 * S)
        pix_to_face[:, s:e, s:e, :] = pix_to_face_full
Nikhila Ravi's avatar
Nikhila Ravi committed
289
        empty = torch.tensor([], device=device)
facebook-github-bot's avatar
facebook-github-bot committed
290

Nikhila Ravi's avatar
Nikhila Ravi committed
291
        random_sign_flip = torch.rand((N, S, S, K), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
292
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
Nikhila Ravi's avatar
Nikhila Ravi committed
293
        zbuf1 = torch.randn(size=(N, S, S, K), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
294
295
296

        # randomly flip the sign of the distance
        # (-) means inside triangle, (+) means outside triangle.
Nikhila Ravi's avatar
Nikhila Ravi committed
297
298
299
        dists1 = (
            torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
        )
facebook-github-bot's avatar
facebook-github-bot committed
300
301
302
303
        dists2 = dists1.clone()
        zbuf2 = zbuf1.clone()
        dists1.requires_grad = True
        dists2.requires_grad = True
Nikhila Ravi's avatar
Nikhila Ravi committed
304
        colors = torch.randn((N, S, S, K, 3), device=device)
facebook-github-bot's avatar
facebook-github-bot committed
305
306
        fragments1 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
307
            bary_coords=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
308
309
310
311
312
            zbuf=zbuf1,
            dists=dists1,
        )
        fragments2 = Fragments(
            pix_to_face=pix_to_face,
Nikhila Ravi's avatar
Nikhila Ravi committed
313
            bary_coords=empty,  # dummy
facebook-github-bot's avatar
facebook-github-bot committed
314
315
316
317
            zbuf=zbuf2,
            dists=dists2,
        )

Nikhila Ravi's avatar
Nikhila Ravi committed
318
319
320
321
322
323
324
325
326
327
328
329
        blend_params = BlendParams(sigma=1e-3)
        args1 = (colors, fragments1, blend_params)
        args2 = (colors, fragments2, blend_params)
        self._compare_impls(
            softmax_rgb_blend,
            softmax_blend_naive,
            args1,
            args2,
            dists1,
            dists2,
            compare_grads=True,
        )
facebook-github-bot's avatar
facebook-github-bot committed
330

Nikhila Ravi's avatar
Nikhila Ravi committed
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
    @staticmethod
    def bm_sigmoid_alpha_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = "cpu",
    ):
        if torch.cuda.is_available() and "cuda:" in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(
            size=(N, S, S, K), requires_grad=True, device=device
        )
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=empty,  # dummy
            dists=dists1,
        )
        blend_params = BlendParams(sigma=1e-3)
        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = sigmoid_alpha_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn

    @staticmethod
    def bm_softmax_blending(
        num_meshes: int = 16,
        image_size: int = 128,
        faces_per_pixel: int = 100,
        device: str = "cpu",
    ):
        if torch.cuda.is_available() and "cuda:" in device:
            # If a device other than the default is used, set the device explicity.
            torch.cuda.set_device(device)

        device = torch.device(device)
        torch.manual_seed(231)

        # Create dummy outputs of rasterization
        N, S, K = num_meshes, image_size, faces_per_pixel
        F = 32  # num faces in the mesh
        pix_to_face = torch.randint(F + 1, size=(N, S, S, K), device=device) - 1
        colors = torch.randn((N, S, S, K, 3), device=device)
        empty = torch.tensor([], device=device)

        # # randomly flip the sign of the distance
        # # (-) means inside triangle, (+) means outside triangle.
        random_sign_flip = torch.rand((N, S, S, K), device=device)
        random_sign_flip[random_sign_flip > 0.5] *= -1.0
        dists1 = torch.randn(
            size=(N, S, S, K), requires_grad=True, device=device
        )
        zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=empty,  # dummy
            zbuf=zbuf,
            dists=dists1,
        )
        blend_params = BlendParams(sigma=1e-3)

        torch.cuda.synchronize()

        def fn():
            # test forward and backward pass
            images = softmax_rgb_blend(colors, fragments, blend_params)
            images.sum().backward()
            torch.cuda.synchronize()

        return fn
422
423

    def test_blend_params(self):
424
        """Test color parameter of BlendParams().
Nikhila Ravi's avatar
Nikhila Ravi committed
425
426
            Assert passed value overrides default value.
            """
427
428
429
430
        bp_default = BlendParams()
        bp_new = BlendParams(background_color=(0.5, 0.5, 0.5))
        self.assertEqual(bp_new.background_color, (0.5, 0.5, 0.5))
        self.assertEqual(bp_default.background_color, (1.0, 1.0, 1.0))