"docs/zh_cn/get_started/build.md" did not exist on "afe0794ce87bf0227c6dfb9bb17fee69615a79b8"
test_bezier_align.py 1.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch

from mmcv.utils import IS_CUDA_AVAILABLE

inputs = ([[[
    [1., 2., 5., 6.],
    [3., 4., 7., 8.],
    [9., 10., 13., 14.],
    [11., 12., 15., 16.],
]]], [[0., 0., 0., 1, 0., 2., 0., 3., 0., 3., 3., 2., 3., 1., 3., 0., 3.]])
outputs = ([[[[1., 1.75, 3.5, 5.25], [2.5, 3.25, 5., 6.75],
              [6., 6.75, 8.5, 10.25],
              [9.5, 10.25, 12., 13.75]]]], [[[[1.5625, 1.5625, 1.5625, 0.3125],
                                              [1.5625, 1.5625, 1.5625, 0.3125],
                                              [1.5625, 1.5625, 1.5625, 0.3125],
                                              [0.3125, 0.3125, 0.3125,
                                               0.0625]]]])


@pytest.mark.parametrize('device', [
    'cpu',
    pytest.param(
        'cuda',
        marks=pytest.mark.skipif(
            not IS_CUDA_AVAILABLE, reason='requires CUDA support'))
])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_bezieralign(device, dtype):
    try:
        from mmcv.ops import bezier_align
    except ModuleNotFoundError:
        pytest.skip('test requires compilation')
    pool_h = 4
    pool_w = 4
    spatial_scale = 1.0
    sampling_ratio = 1
    np_input = np.array(inputs[0])
    np_rois = np.array(inputs[1])
    np_output = np.array(outputs[0])
    np_grad = np.array(outputs[1])

    x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
    rois = torch.tensor(np_rois, dtype=dtype, device=device)

    output = bezier_align(x, rois, (pool_h, pool_w), spatial_scale,
                          sampling_ratio, False)
    output.backward(torch.ones_like(output))
    assert np.allclose(
        output.data.type(torch.float).cpu().numpy(), np_output, atol=1e-3)
    assert np.allclose(
        x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=1e-3)