"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "e8f44af5bf44a79f102678f5d7bb737cd6da3b52"
Unverified Commit 2ce56d35 authored by Leonardo Emili's avatar Leonardo Emili Committed by GitHub
Browse files

Disable Mixtral `output_router_logits` during inference (#29249)

* Set output_router_logits=False in prepare_inputs_for_generation for mixtral

* Add output_router_logits=False to prepare_inputs_for_generation for mixtral

* Fix style
parent 8a8a0a4a
...@@ -1415,7 +1415,13 @@ class MixtralForCausalLM(MixtralPreTrainedModel): ...@@ -1415,7 +1415,13 @@ class MixtralForCausalLM(MixtralPreTrainedModel):
) )
def prepare_inputs_for_generation( def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
output_router_logits=False,
**kwargs,
): ):
# Omit tokens covered by past_key_values # Omit tokens covered by past_key_values
if past_key_values is not None: if past_key_values is not None:
...@@ -1467,6 +1473,7 @@ class MixtralForCausalLM(MixtralPreTrainedModel): ...@@ -1467,6 +1473,7 @@ class MixtralForCausalLM(MixtralPreTrainedModel):
"past_key_values": past_key_values, "past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"), "use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask, "attention_mask": attention_mask,
"output_router_logits": output_router_logits,
} }
) )
return model_inputs return model_inputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment