test_masked_conv2d.py 1.74 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
import numpy as np
import pytest
4
5
import torch

6
7
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE

8
9
10
11
if IS_MLU_AVAILABLE:
    torch.backends.cnnl.allow_tf32 = False
    torch.backends.mlu.matmul.allow_tf32 = False

12

13
class TestMaskedConv2d:
14

15
16
17
18
19
20
21
22
23
24
25
    @pytest.mark.parametrize('device', [
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
        pytest.param(
            'mlu',
            marks=pytest.mark.skipif(
                not IS_MLU_AVAILABLE, reason='requires MLU support'))
    ])
    def test_masked_conv2d_all_close(self, device):
26
        from mmcv.ops import MaskedConv2d
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
        np_input = np.load(
            'tests/data/for_masked_conv2d/masked_conv2d_for_input.npy')
        np_mask = np.load(
            'tests/data/for_masked_conv2d/masked_conv2d_for_mask.npy')
        np_weight = np.load(
            'tests/data/for_masked_conv2d/masked_conv2d_for_weight.npy')
        np_bias = np.load(
            'tests/data/for_masked_conv2d/masked_conv2d_for_bias.npy')
        np_output = np.load(
            'tests/data/for_masked_conv2d/masked_conv2d_for_output.npy')
        input = torch.tensor(np_input, dtype=torch.float, device=device)
        mask = torch.tensor(np_mask, dtype=torch.float, device=device)
        weight = torch.tensor(np_weight, dtype=torch.float, device=device)
        bias = torch.tensor(np_bias, dtype=torch.float, device=device)
        conv = MaskedConv2d(3, 3, 3, 1, 1).to(device)
        conv.weight = torch.nn.Parameter(weight)
        conv.bias = torch.nn.Parameter(bias)
44
        output = conv(input, mask)
45
        assert np.allclose(output.data.cpu().numpy(), np_output, 1e-3)