"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "62f58046088061314545411135a43a0ee8ddf6ba"
Unverified Commit 9322c244 authored by Serge Panev's avatar Serge Panev Committed by GitHub
Browse files

Fix typo in Llama docstrings (#24020)



* Fix typo in Llama docstrings
Signed-off-by: default avatarSerge Panev <spanev@nvidia.com>

* Update
Signed-off-by: default avatarSerge Panev <spanev@nvidia.com>

* make style
Signed-off-by: default avatarSerge Panev <spanev@nvidia.com>

---------
Signed-off-by: default avatarSerge Panev <spanev@nvidia.com>
parent a73883ae
...@@ -669,13 +669,13 @@ class LlamaForCausalLM(LlamaPreTrainedModel): ...@@ -669,13 +669,13 @@ class LlamaForCausalLM(LlamaPreTrainedModel):
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?" >>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt") >>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate >>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```""" ```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
......
...@@ -706,13 +706,13 @@ class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel): ...@@ -706,13 +706,13 @@ class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel):
>>> model = OpenLlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> model = OpenLlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?" >>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt") >>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate >>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```""" ```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
......
...@@ -926,13 +926,13 @@ class OPTForCausalLM(OPTPreTrainedModel): ...@@ -926,13 +926,13 @@ class OPTForCausalLM(OPTPreTrainedModel):
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
>>> prompt = "Hey, are you consciours? Can you talk to me?" >>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt") >>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate >>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
```""" ```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
......
...@@ -54,7 +54,9 @@ _CONFIG_FOR_DOC = "OPTConfig" ...@@ -54,7 +54,9 @@ _CONFIG_FOR_DOC = "OPTConfig"
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
# Causal LM output # Causal LM output
_CAUSAL_LM_EXPECTED_OUTPUT = "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." _CAUSAL_LM_EXPECTED_OUTPUT = (
"Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
)
LARGE_NEGATIVE = -1e8 LARGE_NEGATIVE = -1e8
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment