evaluator.py 3.68 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

"""
Binary to evaluate predictor-based model (consist of models in deployable format such
torchscript, caffe2, etc.) using Detectron2Go system (dataloading, evaluation, etc).
"""

import logging
10
import sys
11
from dataclasses import dataclass
12
from typing import Optional, Type, Union
facebook-github-bot's avatar
facebook-github-bot committed
13

Yanghan Wang's avatar
Yanghan Wang committed
14
import torch
15
from d2go.config import CfgNode
facebook-github-bot's avatar
facebook-github-bot committed
16
from d2go.distributed import launch
17
from d2go.evaluation.api import AccuracyDict, MetricsDict
18
from d2go.quantization.qconfig import smart_decode_backend
19
from d2go.runner import BaseRunner
facebook-github-bot's avatar
facebook-github-bot committed
20
21
22
23
24
25
26
27
28
29
30
31
32
from d2go.setup import (
    basic_argument_parser,
    caffe2_global_init,
    post_mortem_if_fail_for_main,
    prepare_for_launch,
    setup_after_launch,
)
from d2go.utils.misc import print_metrics_table
from mobile_cv.predictor.api import create_predictor

logger = logging.getLogger("d2go.tools.caffe2_evaluator")


33
34
@dataclass
class EvaluatorOutput:
35
36
    accuracy: AccuracyDict[float]
    metrics: MetricsDict[float]
37
38


facebook-github-bot's avatar
facebook-github-bot committed
39
def main(
40
41
42
    cfg: CfgNode,
    output_dir: str,
    runner_class: Union[str, Type[BaseRunner]],
facebook-github-bot's avatar
facebook-github-bot committed
43
    # binary specific optional arguments
44
45
46
47
    predictor_path: str,
    num_threads: Optional[int] = None,
    caffe2_engine: Optional[int] = None,
    caffe2_logging_print_net_summary: int = 0,
48
) -> EvaluatorOutput:
49
50
51
52
    # FIXME: Ideally the quantization backend should be encoded in the torchscript model
    # or the predictor, and be used automatically during the inference, without user
    # manually setting the global variable.
    torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
facebook-github-bot's avatar
facebook-github-bot committed
53
54
    print("run with quantized engine: ", torch.backends.quantized.engine)

55
    runner = setup_after_launch(cfg, output_dir, runner_class)
facebook-github-bot's avatar
facebook-github-bot committed
56
57
58
59
60
    caffe2_global_init(caffe2_logging_print_net_summary, num_threads)

    predictor = create_predictor(predictor_path)
    metrics = runner.do_test(cfg, predictor)
    print_metrics_table(metrics)
61
62
63
64
    return EvaluatorOutput(
        accuracy=metrics,
        metrics=metrics,
    )
facebook-github-bot's avatar
facebook-github-bot committed
65
66
67


def run_with_cmdline_args(args):
68
    cfg, output_dir, runner_name = prepare_for_launch(args)
69
    main_func = main if args.disable_post_mortem else post_mortem_if_fail_for_main(main)
facebook-github-bot's avatar
facebook-github-bot committed
70
    launch(
71
        main_func,
facebook-github-bot's avatar
facebook-github-bot committed
72
73
74
75
76
77
        args.num_processes,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        backend="GLOO",
        always_spawn=False,
78
79
80
81
82
83
84
        args=(cfg, output_dir, runner_name),
        kwargs={
            "predictor_path": args.predictor_path,
            "num_threads": args.num_threads,
            "caffe2_engine": args.caffe2_engine,
            "caffe2_logging_print_net_summary": args.caffe2_logging_print_net_summary,
        },
facebook-github-bot's avatar
facebook-github-bot committed
85
86
87
    )


Tsahi Glik's avatar
Tsahi Glik committed
88
def cli(args=None):
facebook-github-bot's avatar
facebook-github-bot committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    parser = basic_argument_parser()
    parser.add_argument(
        "--predictor-path",
        type=str,
        help="Path (a directory) to the exported model that will be evaluated",
    )
    # === performance config ===========================================================
    parser.add_argument(
        "--num-threads",
        type=int,
        default=None,
        help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit",
    )
    parser.add_argument(
        "--caffe2-engine",
        type=str,
        default=None,
        help="If set, engine of all ops will be set by this value",
    )
    parser.add_argument(
        "--caffe2_logging_print_net_summary",
        type=int,
        default=0,
        help="Control the --caffe2_logging_print_net_summary in GlobalInit",
    )
Tsahi Glik's avatar
Tsahi Glik committed
114
115
    args = sys.argv[1:] if args is None else args
    run_with_cmdline_args(parser.parse_args(args))
116
117
118


if __name__ == "__main__":
Tsahi Glik's avatar
Tsahi Glik committed
119
    cli()