test_fused_bias_leakyrelu.py 2.29 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
import pytest
import torch
4

5
6
from mmcv.utils import IS_CUDA_AVAILABLE, IS_NPU_AVAILABLE

7
8
9
10
11
12
_USING_PARROTS = True
try:
    from parrots.autograd import gradcheck
except ImportError:
    from torch.autograd import gradcheck, gradgradcheck
    _USING_PARROTS = False
13
14


15
class TestFusedBiasLeakyReLU:
16
17
18

    @classmethod
    def setup_class(cls):
19
        if not IS_CUDA_AVAILABLE and not IS_NPU_AVAILABLE:
20
            return
21
22
23
24
25
26
27
28
        if IS_CUDA_AVAILABLE:
            cls.input_tensor = torch.randn((2, 2, 2, 2),
                                           requires_grad=True).cuda()
            cls.bias = torch.zeros(2, requires_grad=True).cuda()
        elif IS_NPU_AVAILABLE:
            cls.input_tensor = torch.randn((2, 2, 2, 2),
                                           requires_grad=True).npu()
            cls.bias = torch.zeros(2, requires_grad=True).npu()
29

30
31
32
33
34
35
36
37
38
39
40
    @pytest.mark.parametrize('device', [
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
        pytest.param(
            'npu',
            marks=pytest.mark.skipif(
                not IS_NPU_AVAILABLE, reason='requires NPU support'))
    ])
    def test_gradient(self, device):
41
42

        from mmcv.ops import FusedBiasLeakyReLU
43
        if _USING_PARROTS:
44
45
46
47
48
49
            if IS_CUDA_AVAILABLE:
                gradcheck(
                    FusedBiasLeakyReLU(2).cuda(),
                    self.input_tensor,
                    delta=1e-4,
                    pt_atol=1e-3)
50
51
        else:
            gradcheck(
52
                FusedBiasLeakyReLU(2).to(device),
53
54
55
                self.input_tensor,
                eps=1e-4,
                atol=1e-3)
56

57
58
59
60
61
62
63
64
65
66
67
    @pytest.mark.parametrize('device', [
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
        pytest.param(
            'npu',
            marks=pytest.mark.skipif(
                not IS_NPU_AVAILABLE, reason='requires NPU support'))
    ])
    def test_gradgradient(self, device):
68
69
70

        from mmcv.ops import FusedBiasLeakyReLU
        gradgradcheck(
71
            FusedBiasLeakyReLU(2).to(device),
72
73
74
            self.input_tensor,
            eps=1e-4,
            atol=1e-3)