Commit 62613829 authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

fix quantization import

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/575

ez

Reviewed By: ajinkya-deogade

Differential Revision: D46773836

fbshipit-source-id: 8cbfbfac6a60cab26ee1975ce0b876738711c160
parent 0389f4ee
...@@ -24,10 +24,10 @@ from mobile_cv.common.misc.iter_utils import recursive_iterate ...@@ -24,10 +24,10 @@ from mobile_cv.common.misc.iter_utils import recursive_iterate
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2]) TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10): if TORCH_VERSION > (1, 10):
from torch.ao.quantization import convert from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else: else:
from torch.quantization import convert from torch.quantization.quantize import convert
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -14,13 +14,13 @@ from mobile_cv.arch.quantization.observer import update_stat as observer_update_ ...@@ -14,13 +14,13 @@ from mobile_cv.arch.quantization.observer import update_stat as observer_update_
from pytorch_lightning import LightningModule, Trainer from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_info from pytorch_lightning.utilities import rank_zero_info
from torch.ao.quantization import ( # @manual from torch.ao.quantization.qconfig import (
get_default_qat_qconfig, get_default_qat_qconfig,
get_default_qconfig, get_default_qconfig,
QConfig, QConfig,
QConfigDynamic, QConfigDynamic,
QuantType,
) )
from torch.ao.quantization.quant_type import QuantType
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from torch.ao.quantization.utils import get_fqn_to_example_inputs, get_quant_type from torch.ao.quantization.utils import get_fqn_to_example_inputs, get_quant_type
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment