evaluator.py 4.62 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

"""
Binary to evaluate predictor-based model (consist of models in deployable format such
torchscript, caffe2, etc.) using Detectron2Go system (dataloading, evaluation, etc).
"""

import logging
10
import sys
11
from typing import Callable, List, Optional, Type, Union
facebook-github-bot's avatar
facebook-github-bot committed
12

Yanghan Wang's avatar
Yanghan Wang committed
13
import torch
14
from d2go.config import CfgNode
facebook-github-bot's avatar
facebook-github-bot committed
15
from d2go.distributed import launch
16
from d2go.quantization.qconfig import smart_decode_backend
17
from d2go.runner import BaseRunner
facebook-github-bot's avatar
facebook-github-bot committed
18
19
from d2go.setup import (
    basic_argument_parser,
Francisc Bungiu's avatar
Francisc Bungiu committed
20
    build_basic_cli_args,
facebook-github-bot's avatar
facebook-github-bot committed
21
22
23
24
    caffe2_global_init,
    post_mortem_if_fail_for_main,
    prepare_for_launch,
    setup_after_launch,
Tsahi Glik's avatar
Tsahi Glik committed
25
    setup_before_launch,
26
    setup_root_logger,
facebook-github-bot's avatar
facebook-github-bot committed
27
)
Francisc Bungiu's avatar
Francisc Bungiu committed
28
from d2go.trainer.api import EvaluatorOutput
29
from d2go.utils.mast import gather_mast_errors, mast_error_handler
30

Francisc Bungiu's avatar
Francisc Bungiu committed
31
from d2go.utils.misc import print_metrics_table, save_binary_outputs
facebook-github-bot's avatar
facebook-github-bot committed
32
33
34
35
36
37
from mobile_cv.predictor.api import create_predictor

logger = logging.getLogger("d2go.tools.caffe2_evaluator")


def main(
38
39
40
    cfg: CfgNode,
    output_dir: str,
    runner_class: Union[str, Type[BaseRunner]],
facebook-github-bot's avatar
facebook-github-bot committed
41
    # binary specific optional arguments
42
43
44
45
    predictor_path: str,
    num_threads: Optional[int] = None,
    caffe2_engine: Optional[int] = None,
    caffe2_logging_print_net_summary: int = 0,
46
) -> EvaluatorOutput:
47
48
49
50
    # FIXME: Ideally the quantization backend should be encoded in the torchscript model
    # or the predictor, and be used automatically during the inference, without user
    # manually setting the global variable.
    torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
facebook-github-bot's avatar
facebook-github-bot committed
51
52
    print("run with quantized engine: ", torch.backends.quantized.engine)

53
    runner = setup_after_launch(cfg, output_dir, runner_class)
facebook-github-bot's avatar
facebook-github-bot committed
54
55
56
57
58
    caffe2_global_init(caffe2_logging_print_net_summary, num_threads)

    predictor = create_predictor(predictor_path)
    metrics = runner.do_test(cfg, predictor)
    print_metrics_table(metrics)
59
60
61
62
    return EvaluatorOutput(
        accuracy=metrics,
        metrics=metrics,
    )
facebook-github-bot's avatar
facebook-github-bot committed
63
64


65
66
67
68
def wrapped_main(*args, **kwargs) -> Callable:
    return mast_error_handler(main)(*args, **kwargs)


facebook-github-bot's avatar
facebook-github-bot committed
69
def run_with_cmdline_args(args):
70
    cfg, output_dir, runner_name = prepare_for_launch(args)
Tsahi Glik's avatar
Tsahi Glik committed
71
    shared_context = setup_before_launch(cfg, output_dir, runner_name)
72
73
74
75
76
    main_func = (
        wrapped_main
        if args.disable_post_mortem
        else post_mortem_if_fail_for_main(wrapped_main)
    )
Francisc Bungiu's avatar
Francisc Bungiu committed
77
    outputs = launch(
78
        main_func,
facebook-github-bot's avatar
facebook-github-bot committed
79
80
81
82
83
84
        args.num_processes,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        backend="GLOO",
        always_spawn=False,
Tsahi Glik's avatar
Tsahi Glik committed
85
        shared_context=shared_context,
86
87
88
89
90
91
92
        args=(cfg, output_dir, runner_name),
        kwargs={
            "predictor_path": args.predictor_path,
            "num_threads": args.num_threads,
            "caffe2_engine": args.caffe2_engine,
            "caffe2_logging_print_net_summary": args.caffe2_logging_print_net_summary,
        },
facebook-github-bot's avatar
facebook-github-bot committed
93
    )
Francisc Bungiu's avatar
Francisc Bungiu committed
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    # Only save results from global rank 0 for consistency.
    if args.save_return_file is not None and args.machine_rank == 0:
        save_binary_outputs(args.save_return_file, outputs[0])


def build_cli_args(
    eval_only: bool = False,
    resume: bool = False,
    **kwargs,
) -> List[str]:
    """Returns parameters in the form of CLI arguments for evaluator binary.

    For the list of non-train_net-specific parameters, see build_basic_cli_args."""
    args = build_basic_cli_args(**kwargs)
    if eval_only:
        args += ["--eval-only"]
    if resume:
        args += ["--resume"]
    return args
facebook-github-bot's avatar
facebook-github-bot committed
113
114


Tsahi Glik's avatar
Tsahi Glik committed
115
def cli(args=None):
facebook-github-bot's avatar
facebook-github-bot committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
    parser = basic_argument_parser()
    parser.add_argument(
        "--predictor-path",
        type=str,
        help="Path (a directory) to the exported model that will be evaluated",
    )
    # === performance config ===========================================================
    parser.add_argument(
        "--num-threads",
        type=int,
        default=None,
        help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit",
    )
    parser.add_argument(
        "--caffe2-engine",
        type=str,
        default=None,
        help="If set, engine of all ops will be set by this value",
    )
    parser.add_argument(
        "--caffe2_logging_print_net_summary",
        type=int,
        default=0,
        help="Control the --caffe2_logging_print_net_summary in GlobalInit",
    )
Tsahi Glik's avatar
Tsahi Glik committed
141
142
    args = sys.argv[1:] if args is None else args
    run_with_cmdline_args(parser.parse_args(args))
143
144
145


if __name__ == "__main__":
146
    setup_root_logger()
147
    gather_mast_errors(cli())