Unverified Commit bc3e20dc authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[`Llama`] remove prompt and fix prefix finetuning (#25565)

* nit

* update

* make sure use_default_system_prompt is saved

* update checkpointing

* consistency

* use_default_system_prompt for test
parent 30b3c46f
......@@ -683,7 +683,7 @@ class LlamaModel(LlamaPreTrainedModel):
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return module(*inputs, past_key_value, output_attentions)
return custom_forward
......@@ -692,7 +692,6 @@ class LlamaModel(LlamaPreTrainedModel):
hidden_states,
attention_mask,
position_ids,
None,
)
else:
layer_outputs = decoder_layer(
......
......@@ -113,6 +113,7 @@ class LlamaTokenizer(PreTrainedTokenizer):
add_bos_token=True,
add_eos_token=False,
clean_up_tokenization_spaces=False,
use_default_system_prompt=True,
spaces_between_special_tokens=False,
legacy=None,
**kwargs,
......@@ -131,6 +132,7 @@ class LlamaTokenizer(PreTrainedTokenizer):
add_eos_token=add_eos_token,
sp_model_kwargs=self.sp_model_kwargs,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
use_default_system_prompt=use_default_system_prompt,
spaces_between_special_tokens=spaces_between_special_tokens,
legacy=legacy,
**kwargs,
......@@ -149,8 +151,9 @@ class LlamaTokenizer(PreTrainedTokenizer):
self.vocab_file = vocab_file
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self.sp_model = self.get_spm_processor()
self.use_default_system_prompt = use_default_system_prompt
self.sp_model = self.get_spm_processor()
self.unk_token_length = len(self.sp_model.encode(str(self.unk_token)))
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
......@@ -390,8 +393,12 @@ class LlamaTokenizer(PreTrainedTokenizer):
`List[int]`:
Input ids for the conversation.
"""
if self.use_default_system_prompt:
if len(conversation.past_user_inputs) > 0:
if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:
if (
not conversation.past_user_inputs[0].startswith(B_SYS)
or E_SYS not in conversation.past_user_inputs[0]
):
conversation.past_user_inputs[0] = (
B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]
)
......
......@@ -110,6 +110,7 @@ class LlamaTokenizerFast(PreTrainedTokenizerFast):
eos_token="</s>",
add_bos_token=True,
add_eos_token=False,
use_default_system_prompt=True,
**kwargs,
):
super().__init__(
......@@ -119,12 +120,13 @@ class LlamaTokenizerFast(PreTrainedTokenizerFast):
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
use_default_system_prompt=use_default_system_prompt,
**kwargs,
)
self._add_bos_token = add_bos_token
self._add_eos_token = add_eos_token
self.update_post_processor()
self.use_default_system_prompt = use_default_system_prompt
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
......@@ -212,8 +214,12 @@ class LlamaTokenizerFast(PreTrainedTokenizerFast):
`List[int]`:
Input ids for the conversation.
"""
if self.use_default_system_prompt:
if len(conversation.past_user_inputs) > 0:
if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:
if (
not conversation.past_user_inputs[0].startswith(B_SYS)
or E_SYS not in conversation.past_user_inputs[0]
):
conversation.past_user_inputs[0] = (
B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]
)
......
......@@ -220,7 +220,7 @@ class ConversationalPipelineTests(unittest.TestCase):
@require_torch
@slow
def test_integration_torch_conversation_llama2_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_default_system_prompt=True)
conversation = Conversation(
"What is so great about #1?",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment