Unverified Commit ed315799 authored by Liurl's avatar Liurl Committed by GitHub
Browse files

Fix marlin model loading compat with autogptq (#290)


Co-authored-by: default avatarLRL <lrl@lbx.dev>
parent 92e2d74f
......@@ -300,9 +300,15 @@ class ModelRunner:
self.model_config.hf_config, "quantization_config", None
)
if hf_quant_config is not None:
quant_config_class = QUANTIONCONFIG_MAPPING.get(
hf_quant_config["quant_method"]
)
hf_quant_method = hf_quant_config["quant_method"]
# compat: autogptq uses is_marlin_format within quant config
if (hf_quant_method == "gptq"
and "is_marlin_format" in hf_quant_config
and hf_quant_config["is_marlin_format"]):
hf_quant_method = "marlin"
quant_config_class = QUANTIONCONFIG_MAPPING.get(hf_quant_method)
if quant_config_class is None:
raise ValueError(
f"Unsupported quantization method: {hf_quant_config['quant_method']}"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment