"sgl-kernel/vscode:/vscode.git/clone" did not exist on "6b45a21d16a34f23ab2e6ff945987aaa076cfba9"
Commit 6c8c2fc8 authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

remove support of using prepare_for_quant in FX mode

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/338

Now we should've separated all the `prepare_for_quant` for eager and FX mode, we can remove this branch.

Reviewed By: jerryzh168

Differential Revision: D37865628

fbshipit-source-id: cd8f3aa7c90201f44bcfdbd65eb2edf5eded0e0c
parent a2659f5c
......@@ -297,7 +297,9 @@ def prepare_fake_quant_model(cfg, model, is_qat, example_input=None):
if hasattr(model, "prepare_for_quant"):
model = model.prepare_for_quant(cfg)
else:
logger.info("Using default implementation for prepare_for_quant")
logger.info(
"Using default implementation for prepare_for_quant (eager mode)"
)
model = default_prepare_for_quant(cfg, model)
# NOTE: eager model needs to call prepare after `prepare_for_quant`
if is_qat:
......@@ -315,11 +317,10 @@ def prepare_fake_quant_model(cfg, model, is_qat, example_input=None):
if hasattr(model, "custom_prepare_fx"):
model = model.custom_prepare_fx(cfg, is_qat, example_input)
# TODO: remove this branch after completely separating the eager and FX APIs
elif hasattr(model, "prepare_for_quant"):
model = model.prepare_for_quant(cfg, example_input)
else:
logger.info("Using default implementation for custom_prepare_fx")
logger.info(
"Using default implementation for custom_prepare_fx (FX graph mode)"
)
model = default_custom_prepare_fx(cfg, model, is_qat, example_input)
return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment