Unverified Commit 2d92db84 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

`Llama` family, fix `use_cache=False` generation (#30380)

* nit to make sure cache positions are not sliced

* fix other models

* nit

* style
parent f16caf44
......@@ -1175,7 +1175,14 @@ class CohereForCausalLM(CoherePreTrainedModel):
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
......@@ -1239,7 +1246,7 @@ class CohereForCausalLM(CoherePreTrainedModel):
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]
if has_static_cache:
......@@ -1250,7 +1257,7 @@ class CohereForCausalLM(CoherePreTrainedModel):
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
......
......@@ -1157,7 +1157,14 @@ class GemmaForCausalLM(GemmaPreTrainedModel):
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
......@@ -1221,7 +1228,7 @@ class GemmaForCausalLM(GemmaPreTrainedModel):
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]
if has_static_cache:
......@@ -1232,7 +1239,7 @@ class GemmaForCausalLM(GemmaPreTrainedModel):
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
......
......@@ -1253,7 +1253,14 @@ class LlamaForCausalLM(LlamaPreTrainedModel):
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
......@@ -1317,7 +1324,7 @@ class LlamaForCausalLM(LlamaPreTrainedModel):
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]
if has_static_cache:
......@@ -1328,7 +1335,7 @@ class LlamaForCausalLM(LlamaPreTrainedModel):
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
......
......@@ -1234,7 +1234,14 @@ class OlmoForCausalLM(OlmoPreTrainedModel):
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
......@@ -1298,7 +1305,7 @@ class OlmoForCausalLM(OlmoPreTrainedModel):
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]
if has_static_cache:
......@@ -1309,7 +1316,7 @@ class OlmoForCausalLM(OlmoPreTrainedModel):
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment