test_stylegan3_ops.py 1.96 KB
Newer Older
limm's avatar
limm committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn

from mmgen.ops.stylegan3.ops import bias_act, upfirdn2d


class TestStyleGAN3Ops:

    @classmethod
    def setup_class(cls):
        cls.input = torch.randn((1, 3, 16, 16))
        cls.bias = torch.randn(3)
        cls.kernel = nn.Parameter(torch.randn(3, 3), requires_grad=False)

    def test_s3_ops_cpu(self):
        out = upfirdn2d.upfirdn2d(self.input, self.kernel)
        assert out.shape == (1, 3, 14, 14)

        out = upfirdn2d.upfirdn2d(
            self.input, self.kernel, up=2, down=1, padding=1)
        assert out.shape == (1, 3, 32, 32)

        out = upfirdn2d.upfirdn2d(
            self.input, self.kernel, up=1, down=2, padding=1)
        assert out.shape == (1, 3, 8, 8)

        out = bias_act.bias_act(self.input)
        assert out.shape == (1, 3, 16, 16)

        # test bias
        out = bias_act.bias_act(self.input, self.bias)
        assert out.shape == (1, 3, 16, 16)

        # test gain
        out = bias_act.bias_act(self.input, gain=0.5)
        assert out.shape == (1, 3, 16, 16)

    @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
    def test_s3_ops_cuda(self):
        out = upfirdn2d.upfirdn2d(self.input.cuda(), self.kernel.cuda())
        assert out.shape == (1, 3, 14, 14)

        out = upfirdn2d.upfirdn2d(
            self.input.cuda(), self.kernel.cuda(), up=2, down=1, padding=1)
        assert out.shape == (1, 3, 32, 32)

        out = upfirdn2d.upfirdn2d(
            self.input.cuda(), self.kernel.cuda(), up=1, down=2, padding=1)
        assert out.shape == (1, 3, 8, 8)

        out = bias_act.bias_act(self.input.cuda())
        assert out.shape == (1, 3, 16, 16)

        # test bias
        out = bias_act.bias_act(self.input.cuda(), self.bias.cuda())
        assert out.shape == (1, 3, 16, 16)

        # test gain
        out = bias_act.bias_act(self.input.cuda(), gain=0.5)
        assert out.shape == (1, 3, 16, 16)