evaluator.py 3.82 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

"""
Binary to evaluate predictor-based model (consist of models in deployable format such
torchscript, caffe2, etc.) using Detectron2Go system (dataloading, evaluation, etc).
"""

import logging
10
import sys
11
from dataclasses import dataclass
12
from typing import Optional, Type, Union
facebook-github-bot's avatar
facebook-github-bot committed
13

Yanghan Wang's avatar
Yanghan Wang committed
14
import torch
15
from d2go.config import CfgNode
facebook-github-bot's avatar
facebook-github-bot committed
16
from d2go.distributed import launch
17
from d2go.evaluation.api import AccuracyDict, MetricsDict
18
from d2go.quantization.qconfig import smart_decode_backend
19
from d2go.runner import BaseRunner
facebook-github-bot's avatar
facebook-github-bot committed
20
21
22
23
24
25
from d2go.setup import (
    basic_argument_parser,
    caffe2_global_init,
    post_mortem_if_fail_for_main,
    prepare_for_launch,
    setup_after_launch,
Tsahi Glik's avatar
Tsahi Glik committed
26
    setup_before_launch,
facebook-github-bot's avatar
facebook-github-bot committed
27
28
29
30
31
32
33
)
from d2go.utils.misc import print_metrics_table
from mobile_cv.predictor.api import create_predictor

logger = logging.getLogger("d2go.tools.caffe2_evaluator")


34
35
@dataclass
class EvaluatorOutput:
36
37
    accuracy: AccuracyDict[float]
    metrics: MetricsDict[float]
38
39


facebook-github-bot's avatar
facebook-github-bot committed
40
def main(
41
42
43
    cfg: CfgNode,
    output_dir: str,
    runner_class: Union[str, Type[BaseRunner]],
facebook-github-bot's avatar
facebook-github-bot committed
44
    # binary specific optional arguments
45
46
47
48
    predictor_path: str,
    num_threads: Optional[int] = None,
    caffe2_engine: Optional[int] = None,
    caffe2_logging_print_net_summary: int = 0,
49
) -> EvaluatorOutput:
50
51
52
53
    # FIXME: Ideally the quantization backend should be encoded in the torchscript model
    # or the predictor, and be used automatically during the inference, without user
    # manually setting the global variable.
    torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
facebook-github-bot's avatar
facebook-github-bot committed
54
55
    print("run with quantized engine: ", torch.backends.quantized.engine)

56
    runner = setup_after_launch(cfg, output_dir, runner_class)
facebook-github-bot's avatar
facebook-github-bot committed
57
58
59
60
61
    caffe2_global_init(caffe2_logging_print_net_summary, num_threads)

    predictor = create_predictor(predictor_path)
    metrics = runner.do_test(cfg, predictor)
    print_metrics_table(metrics)
62
63
64
65
    return EvaluatorOutput(
        accuracy=metrics,
        metrics=metrics,
    )
facebook-github-bot's avatar
facebook-github-bot committed
66
67
68


def run_with_cmdline_args(args):
69
    cfg, output_dir, runner_name = prepare_for_launch(args)
Tsahi Glik's avatar
Tsahi Glik committed
70
    shared_context = setup_before_launch(cfg, output_dir, runner_name)
71
    main_func = main if args.disable_post_mortem else post_mortem_if_fail_for_main(main)
facebook-github-bot's avatar
facebook-github-bot committed
72
    launch(
73
        main_func,
facebook-github-bot's avatar
facebook-github-bot committed
74
75
76
77
78
79
        args.num_processes,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        backend="GLOO",
        always_spawn=False,
Tsahi Glik's avatar
Tsahi Glik committed
80
        shared_context=shared_context,
81
82
83
84
85
86
87
        args=(cfg, output_dir, runner_name),
        kwargs={
            "predictor_path": args.predictor_path,
            "num_threads": args.num_threads,
            "caffe2_engine": args.caffe2_engine,
            "caffe2_logging_print_net_summary": args.caffe2_logging_print_net_summary,
        },
facebook-github-bot's avatar
facebook-github-bot committed
88
89
90
    )


Tsahi Glik's avatar
Tsahi Glik committed
91
def cli(args=None):
facebook-github-bot's avatar
facebook-github-bot committed
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
    parser = basic_argument_parser()
    parser.add_argument(
        "--predictor-path",
        type=str,
        help="Path (a directory) to the exported model that will be evaluated",
    )
    # === performance config ===========================================================
    parser.add_argument(
        "--num-threads",
        type=int,
        default=None,
        help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit",
    )
    parser.add_argument(
        "--caffe2-engine",
        type=str,
        default=None,
        help="If set, engine of all ops will be set by this value",
    )
    parser.add_argument(
        "--caffe2_logging_print_net_summary",
        type=int,
        default=0,
        help="Control the --caffe2_logging_print_net_summary in GlobalInit",
    )
Tsahi Glik's avatar
Tsahi Glik committed
117
118
    args = sys.argv[1:] if args is None else args
    run_with_cmdline_args(parser.parse_args(args))
119
120
121


if __name__ == "__main__":
Tsahi Glik's avatar
Tsahi Glik committed
122
    cli()