evaluator.py 3.43 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

"""
Binary to evaluate predictor-based model (consist of models in deployable format such
torchscript, caffe2, etc.) using Detectron2Go system (dataloading, evaluation, etc).
"""

import logging
10
import sys
11
12
from dataclasses import dataclass
from typing import Any, Dict, Optional, Type, Union
facebook-github-bot's avatar
facebook-github-bot committed
13

Yanghan Wang's avatar
Yanghan Wang committed
14
import torch
15
from d2go.config import CfgNode
facebook-github-bot's avatar
facebook-github-bot committed
16
from d2go.distributed import launch
17
from d2go.runner import BaseRunner
facebook-github-bot's avatar
facebook-github-bot committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from d2go.setup import (
    basic_argument_parser,
    caffe2_global_init,
    post_mortem_if_fail_for_main,
    prepare_for_launch,
    setup_after_launch,
)
from d2go.utils.misc import print_metrics_table
from mobile_cv.common.misc.py import post_mortem_if_fail
from mobile_cv.predictor.api import create_predictor

logger = logging.getLogger("d2go.tools.caffe2_evaluator")


32
33
34
35
36
37
38
@dataclass
class EvaluatorOutput:
    accuracy: Dict[str, Any]
    # TODO: support arbitrary levels of dicts
    metrics: Dict[str, Dict[str, Dict[str, Dict[str, float]]]]


facebook-github-bot's avatar
facebook-github-bot committed
39
def main(
40
41
42
    cfg: CfgNode,
    output_dir: str,
    runner_class: Union[str, Type[BaseRunner]],
facebook-github-bot's avatar
facebook-github-bot committed
43
    # binary specific optional arguments
44
45
46
47
    predictor_path: str,
    num_threads: Optional[int] = None,
    caffe2_engine: Optional[int] = None,
    caffe2_logging_print_net_summary: int = 0,
48
) -> EvaluatorOutput:
facebook-github-bot's avatar
facebook-github-bot committed
49
50
51
    torch.backends.quantized.engine = cfg.QUANTIZATION.BACKEND
    print("run with quantized engine: ", torch.backends.quantized.engine)

52
    runner = setup_after_launch(cfg, output_dir, runner_class)
facebook-github-bot's avatar
facebook-github-bot committed
53
54
55
56
57
    caffe2_global_init(caffe2_logging_print_net_summary, num_threads)

    predictor = create_predictor(predictor_path)
    metrics = runner.do_test(cfg, predictor)
    print_metrics_table(metrics)
58
59
60
61
    return EvaluatorOutput(
        accuracy=metrics,
        metrics=metrics,
    )
facebook-github-bot's avatar
facebook-github-bot committed
62
63
64
65


@post_mortem_if_fail()
def run_with_cmdline_args(args):
66
    cfg, output_dir, runner_name = prepare_for_launch(args)
facebook-github-bot's avatar
facebook-github-bot committed
67
68
69
70
71
72
73
74
    launch(
        post_mortem_if_fail_for_main(main),
        args.num_processes,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        backend="GLOO",
        always_spawn=False,
75
76
77
78
79
80
81
        args=(cfg, output_dir, runner_name),
        kwargs={
            "predictor_path": args.predictor_path,
            "num_threads": args.num_threads,
            "caffe2_engine": args.caffe2_engine,
            "caffe2_logging_print_net_summary": args.caffe2_logging_print_net_summary,
        },
facebook-github-bot's avatar
facebook-github-bot committed
82
83
84
    )


Tsahi Glik's avatar
Tsahi Glik committed
85
def cli(args=None):
facebook-github-bot's avatar
facebook-github-bot committed
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    parser = basic_argument_parser()
    parser.add_argument(
        "--predictor-path",
        type=str,
        help="Path (a directory) to the exported model that will be evaluated",
    )
    # === performance config ===========================================================
    parser.add_argument(
        "--num-threads",
        type=int,
        default=None,
        help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit",
    )
    parser.add_argument(
        "--caffe2-engine",
        type=str,
        default=None,
        help="If set, engine of all ops will be set by this value",
    )
    parser.add_argument(
        "--caffe2_logging_print_net_summary",
        type=int,
        default=0,
        help="Control the --caffe2_logging_print_net_summary in GlobalInit",
    )
Tsahi Glik's avatar
Tsahi Glik committed
111
112
    args = sys.argv[1:] if args is None else args
    run_with_cmdline_args(parser.parse_args(args))
113
114
115


if __name__ == "__main__":
Tsahi Glik's avatar
Tsahi Glik committed
116
    cli()