Unverified Commit 6b39470b authored by Marc Sun's avatar Marc Sun Committed by GitHub
Browse files

Raise error when quantizing a quantized model (#27500)

add error msg
parent fd65aa98
......@@ -2781,7 +2781,12 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
quantization_method_from_config = config.quantization_config.get(
"quant_method", QuantizationMethod.BITS_AND_BYTES
)
if quantization_method_from_config is not None and quantization_method_from_args is not None:
if quantization_method_from_config != quantization_method_from_args:
raise ValueError(
f"The model is already quantized with {quantization_method_from_config}. "
f"You can't quantize it again with {quantization_method_from_args}"
)
if quantization_method_from_config == QuantizationMethod.GPTQ and quantization_method_from_args is not None:
loading_attr_dict = quantization_config.get_loading_attributes()
for attr, val in loading_attr_dict.items():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment