Unverified Commit 77cf771e authored by Ke Bao's avatar Ke Bao Committed by GitHub
Browse files

Fix EAGLE3 for llama3.3 70b (#4716)

parent 8154de5a
......@@ -610,6 +610,12 @@ class LlamaForCausalLM(nn.Module):
return self.model.embed_tokens.weight
def set_embed(self, embed):
# NOTE: If draft hidden size != target hidden size, the embed weight cannot be shared for EAGLE3
if (
hasattr(self.config, "target_hidden_size")
and self.config.target_hidden_size != self.config.hidden_size
):
return
del self.model.embed_tokens.weight
self.model.embed_tokens.weight = embed
torch.cuda.empty_cache()
......
......@@ -105,7 +105,10 @@ class LlamaModel(nn.Module):
prefix=add_prefix("embed_tokens", prefix),
)
self.midlayer = LlamaDecoderLayer(config, 0, quant_config, prefix)
self.fc = torch.nn.Linear(config.hidden_size * 3, config.hidden_size)
if hasattr(config, "target_hidden_size"):
self.fc = torch.nn.Linear(config.target_hidden_size * 3, config.hidden_size)
else:
self.fc = torch.nn.Linear(config.hidden_size * 3, config.hidden_size)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment