Unverified Commit e0b5dbce authored by HAI's avatar HAI Committed by GitHub
Browse files

[FP8 KV Cache] Avoid KeyError at loading pre-quantized FP8 model with kv_scale (#1559)

parent e6852b0d
......@@ -400,6 +400,9 @@ class LlamaForCausalLM(nn.Module):
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip loading kv_scale from ckpts towards new design.
if name.endswith(".kv_scale") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment