eval_metric.py 1.85 KB
Newer Older
limm's avatar
limm committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Copyright (c) OpenMMLab. All rights reserved.
import argparse

import mmengine
import rich
from mmengine import DictAction
from mmengine.evaluator import Evaluator

from mmpretrain.registry import METRICS

HELP_URL = (
    'https://mmpretrain.readthedocs.io/en/latest/useful_tools/'
    'log_result_analysis.html#how-to-conduct-offline-metric-evaluation')

prog_description = f"""\
Evaluate metric of the results saved in pkl format.

The detailed usage can be found in {HELP_URL}
"""


def parse_args():
    parser = argparse.ArgumentParser(description=prog_description)
    parser.add_argument('pkl_results', help='Results in pickle format')
    parser.add_argument(
        '--metric',
        nargs='+',
        action='append',
        dest='metric_options',
        help='The metric config, the key-value pair in xxx=yyy format will be '
        'parsed as the metric config items. You can specify multiple metrics '
        'by use multiple `--metric`. For list type value, you can use '
        '"key=[a,b]" or "key=a,b", and it also allows nested list/tuple '
        'values, e.g. "key=[(a,b),(c,d)]".')
    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    if args.metric_options is None:
        raise ValueError('Please speicfy at least one `--metric`. '
                         f'The detailed usage can be found in {HELP_URL}')

    test_metrics = []
    for metric_option in args.metric_options:
        metric_cfg = {}
        for kv in metric_option:
            k, v = kv.split('=', maxsplit=1)
            metric_cfg[k] = DictAction._parse_iterable(v)
        test_metrics.append(METRICS.build(metric_cfg))

    predictions = mmengine.load(args.pkl_results)

    evaluator = Evaluator(test_metrics)
    eval_results = evaluator.offline_evaluate(predictions, None)
    rich.print(eval_results)


if __name__ == '__main__':
    main()