Unverified Commit 52b70bc1 authored by chenbohua3's avatar chenbohua3 Committed by GitHub
Browse files

restore weight by default for qat quantizer (#3992)

parent 5c9797a2
...@@ -563,14 +563,13 @@ class QAT_Quantizer(Quantizer): ...@@ -563,14 +563,13 @@ class QAT_Quantizer(Quantizer):
calibration_config[name]['tracked_max_input'] = float(module.tracked_max_input) calibration_config[name]['tracked_max_input'] = float(module.tracked_max_input)
# Recover weight/bias for batch normalization folding # Recover weight/bias for batch normalization folding
actual_weight = getattr(module, 'old_weight', None)
if actual_weight is None:
logger.warning("Can not recover weight for layer %s. "
"This may lead to a wrong accuracy performance on the backend.", name)
delattr(module, 'weight')
module.register_parameter('weight', actual_weight)
if hasattr(module, BN_FOLD_TAG): if hasattr(module, BN_FOLD_TAG):
actual_weight = getattr(module, 'old_weight', None)
if actual_weight is None:
logger.warning("Can not recover weight for layer %s. "
"This may lead to a wrong accuracy performance on the backend.", name)
delattr(module, 'weight')
module.register_parameter('weight', actual_weight)
actual_bias = getattr(module, 'old_bias', None) actual_bias = getattr(module, 'old_bias', None)
delattr(module, 'bias') delattr(module, 'bias')
if actual_bias is not None: if actual_bias is not None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment