Unverified Commit 1d12b8bc authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

ENH: Do not pass warning message in case `quantization_config` is in config...


ENH: Do not pass warning message in case `quantization_config` is in config but not passed as an arg (#28988)

* Update auto.py

* Update auto.py

* Update src/transformers/quantizers/auto.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Update src/transformers/quantizers/auto.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

---------
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>
parent bd4b83e1
......@@ -129,10 +129,13 @@ class AutoHfQuantizer:
"""
handles situations where both quantization_config from args and quantization_config from model config are present.
"""
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be prevail."
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
......@@ -144,5 +147,7 @@ class AutoHfQuantizer:
setattr(quantization_config, attr, val)
warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
if warning_msg != "":
warnings.warn(warning_msg)
return quantization_config
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment