test_masked_conv2d.py 1.61 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
import numpy as np
import pytest
4
5
import torch

6
7
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE

8

9
class TestMaskedConv2d:
10

11
12
13
14
15
16
17
18
19
20
21
    @pytest.mark.parametrize('device', [
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
        pytest.param(
            'mlu',
            marks=pytest.mark.skipif(
                not IS_MLU_AVAILABLE, reason='requires MLU support'))
    ])
    def test_masked_conv2d_all_close(self, device):
22
        from mmcv.ops import MaskedConv2d
23
        np_input = np.load(
xiabo's avatar
xiabo committed
24
            './data/for_masked_conv2d/masked_conv2d_for_input.npy')
25
        np_mask = np.load(
xiabo's avatar
xiabo committed
26
            './data/for_masked_conv2d/masked_conv2d_for_mask.npy')
27
        np_weight = np.load(
xiabo's avatar
xiabo committed
28
            './data/for_masked_conv2d/masked_conv2d_for_weight.npy')
29
        np_bias = np.load(
xiabo's avatar
xiabo committed
30
            './data/for_masked_conv2d/masked_conv2d_for_bias.npy')
31
        np_output = np.load(
xiabo's avatar
xiabo committed
32
            './data/for_masked_conv2d/masked_conv2d_for_output.npy')
33
34
35
36
37
38
39
        input = torch.tensor(np_input, dtype=torch.float, device=device)
        mask = torch.tensor(np_mask, dtype=torch.float, device=device)
        weight = torch.tensor(np_weight, dtype=torch.float, device=device)
        bias = torch.tensor(np_bias, dtype=torch.float, device=device)
        conv = MaskedConv2d(3, 3, 3, 1, 1).to(device)
        conv.weight = torch.nn.Parameter(weight)
        conv.bias = torch.nn.Parameter(bias)
40
        output = conv(input, mask)
41
        assert np.allclose(output.data.cpu().numpy(), np_output, 1e-3)