Commit 0940b814 authored by Ivan Malin's avatar Ivan Malin Committed by Facebook GitHub Bot
Browse files

Move predictor type check into a separate function

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/600

To be able to reuse this logic

Reviewed By: wat3rBro

Differential Revision: D47722117

fbshipit-source-id: 4df1083317eb29fce45ecc4d8c0fdffa417b70d4
parent d8734049
...@@ -41,6 +41,10 @@ from mobile_cv.predictor.api import ModelInfo, PredictorInfo ...@@ -41,6 +41,10 @@ from mobile_cv.predictor.api import ModelInfo, PredictorInfo
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def is_predictor_quantized(predictor_type: str) -> bool:
return "int8" in predictor_type
def convert_model( def convert_model(
cfg: CfgNode, cfg: CfgNode,
pytorch_model: nn.Module, pytorch_model: nn.Module,
...@@ -50,7 +54,7 @@ def convert_model( ...@@ -50,7 +54,7 @@ def convert_model(
"""Converts pytorch model to pytorch model (fuse for fp32, fake quantize for int8)""" """Converts pytorch model to pytorch model (fuse for fp32, fake quantize for int8)"""
return ( return (
convert_quantized_model(cfg, pytorch_model, data_loader) convert_quantized_model(cfg, pytorch_model, data_loader)
if "int8" in predictor_type if is_predictor_quantized(predictor_type)
else _convert_fp_model(cfg, pytorch_model, data_loader) else _convert_fp_model(cfg, pytorch_model, data_loader)
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment