"vscode:/vscode.git/clone" did not exist on "d720fdb6857f4b71d922ca1e8efbe5271b5fb7c2"
Commit 272290cd authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

use the decoded backend in evaluator

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/367

EZ

Reviewed By: xiecong

Differential Revision: D39407416

fbshipit-source-id: d0e6fa09ff926780e98c210bfce955e6b8eec7f6
parent 38216feb
...@@ -15,6 +15,7 @@ import torch ...@@ -15,6 +15,7 @@ import torch
from d2go.config import CfgNode from d2go.config import CfgNode
from d2go.distributed import launch from d2go.distributed import launch
from d2go.evaluation.api import AccuracyDict, MetricsDict from d2go.evaluation.api import AccuracyDict, MetricsDict
from d2go.quantization.qconfig import smart_decode_backend
from d2go.runner import BaseRunner from d2go.runner import BaseRunner
from d2go.setup import ( from d2go.setup import (
basic_argument_parser, basic_argument_parser,
...@@ -45,7 +46,10 @@ def main( ...@@ -45,7 +46,10 @@ def main(
caffe2_engine: Optional[int] = None, caffe2_engine: Optional[int] = None,
caffe2_logging_print_net_summary: int = 0, caffe2_logging_print_net_summary: int = 0,
) -> EvaluatorOutput: ) -> EvaluatorOutput:
torch.backends.quantized.engine = cfg.QUANTIZATION.BACKEND # FIXME: Ideally the quantization backend should be encoded in the torchscript model
# or the predictor, and be used automatically during the inference, without user
# manually setting the global variable.
torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
print("run with quantized engine: ", torch.backends.quantized.engine) print("run with quantized engine: ", torch.backends.quantized.engine)
runner = setup_after_launch(cfg, output_dir, runner_class) runner = setup_after_launch(cfg, output_dir, runner_class)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment