test_metrics.py 3.5 KB
Newer Older
1
2
3
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial

unknown's avatar
unknown committed
4
5
6
7
import pytest
import torch

from mmcls.core import average_performance, mAP
8
from mmcls.models.losses.accuracy import Accuracy, accuracy_numpy
unknown's avatar
unknown committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60


def test_mAP():
    target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1],
                           [0, 1, 0, -1]])
    pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1],
                         [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2]])

    # target and pred should both be np.ndarray or torch.Tensor
    with pytest.raises(TypeError):
        target_list = target.tolist()
        _ = mAP(pred, target_list)

    # target and pred should be in the same shape
    with pytest.raises(AssertionError):
        target_shorter = target[:-1]
        _ = mAP(pred, target_shorter)

    assert mAP(pred, target) == pytest.approx(68.75, rel=1e-2)

    target_no_difficult = torch.Tensor([[1, 1, 0, 0], [0, 1, 0, 0],
                                        [0, 0, 1, 0], [1, 0, 0, 0]])
    assert mAP(pred, target_no_difficult) == pytest.approx(70.83, rel=1e-2)


def test_average_performance():
    target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1],
                           [0, 1, 0, -1], [0, 1, 0, -1]])
    pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1],
                         [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2],
                         [0.8, 0.1, 0.1, 0.2]])

    # target and pred should both be np.ndarray or torch.Tensor
    with pytest.raises(TypeError):
        target_list = target.tolist()
        _ = average_performance(pred, target_list)

    # target and pred should be in the same shape
    with pytest.raises(AssertionError):
        target_shorter = target[:-1]
        _ = average_performance(pred, target_shorter)

    assert average_performance(pred, target) == average_performance(
        pred, target, thr=0.5)
    assert average_performance(pred, target, thr=0.5, k=2) \
        == average_performance(pred, target, thr=0.5)
    assert average_performance(
        pred, target, thr=0.3) == pytest.approx(
            (31.25, 43.75, 36.46, 33.33, 42.86, 37.50), rel=1e-2)
    assert average_performance(
        pred, target, k=2) == pytest.approx(
            (43.75, 50.00, 46.67, 40.00, 57.14, 47.06), rel=1e-2)
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93


def test_accuracy():
    pred_tensor = torch.tensor([[0.1, 0.2, 0.4], [0.2, 0.5, 0.3],
                                [0.4, 0.3, 0.1], [0.8, 0.9, 0.0]])
    target_tensor = torch.tensor([2, 0, 0, 0])
    pred_array = pred_tensor.numpy()
    target_array = target_tensor.numpy()

    acc_top1 = 50.
    acc_top2 = 75.

    compute_acc = Accuracy(topk=1)
    assert compute_acc(pred_tensor, target_tensor) == acc_top1
    assert compute_acc(pred_array, target_array) == acc_top1

    compute_acc = Accuracy(topk=(1, ))
    assert compute_acc(pred_tensor, target_tensor)[0] == acc_top1
    assert compute_acc(pred_array, target_array)[0] == acc_top1

    compute_acc = Accuracy(topk=(1, 2))
    assert compute_acc(pred_tensor, target_array)[0] == acc_top1
    assert compute_acc(pred_tensor, target_tensor)[1] == acc_top2
    assert compute_acc(pred_array, target_array)[0] == acc_top1
    assert compute_acc(pred_array, target_array)[1] == acc_top2

    with pytest.raises(AssertionError):
        compute_acc(pred_tensor, 'other_type')

    # test accuracy_numpy
    compute_acc = partial(accuracy_numpy, topk=(1, 2))
    assert compute_acc(pred_array, target_array)[0] == acc_top1
    assert compute_acc(pred_array, target_array)[1] == acc_top2