Unverified Commit 54dd3ea1 authored by HAI's avatar HAI Committed by GitHub
Browse files

[FP8 KV Cache, Mixtral] Avoid KeyError at loading pre-quantized FP8 m… (#1835)

parent d04899d7
...@@ -369,6 +369,9 @@ class MixtralForCausalLM(nn.Module): ...@@ -369,6 +369,9 @@ class MixtralForCausalLM(nn.Module):
# Skip loading extra bias for GPTQ models. # Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict: if name.endswith(".bias") and name not in params_dict:
continue continue
# Skip loading kv_scale from ckpts towards new design.
if name.endswith(".kv_scale") and name not in params_dict:
continue
if name is None: if name is None:
continue continue
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment