Unverified Commit aee094e4 authored by zyksir's avatar zyksir Committed by GitHub
Browse files

add support for nvidia/gpt-oss-120b-Eagle3 (#9739)

parent 55349e36
......@@ -185,9 +185,13 @@ class LlamaForCausalLMEagle3(LlamaForCausalLM):
)
# Llama 3.2 1B Instruct set tie_word_embeddings to True
# Llama 3.1 8B Instruct set tie_word_embeddings to False
self.load_lm_head_from_target = False
if self.config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
if config.draft_vocab_size is None:
self.load_lm_head_from_target = True
config.draft_vocab_size = config.vocab_size
self.lm_head = ParallelLMHead(
config.draft_vocab_size,
config.hidden_size,
......
......@@ -137,7 +137,14 @@ class EAGLEWorker(TpModelWorker):
embed, head = self.target_worker.model_runner.model.get_embed_and_head()
if self.speculative_algorithm.is_eagle3():
# EAGLE3 models don't share lm_head
# most cases EAGLE3 models don't share lm_head
# but some models (e.g. nvidia/gpt-oss-120b-Eagle3) shares
if (
hasattr(self.draft_model_runner.model, "load_lm_head_from_target")
and self.draft_model_runner.model.load_lm_head_from_target
):
self.draft_model_runner.model.set_embed_and_head(embed, head)
else:
self.draft_model_runner.model.set_embed(embed)
# grab hot token ids
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment