"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "84d346b68707f3c43903b122baae76ae022ef420"
Unverified Commit 8395f14d authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix doc examples: KeyError (#14699)


Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent bab15564
...@@ -512,7 +512,6 @@ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r""" ...@@ -512,7 +512,6 @@ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
>>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> UTTERANCE = "My friends are cool but they eat too many carbs."
>>> print("Human: ", UTTERANCE) >>> print("Human: ", UTTERANCE)
>>> inputs = tokenizer([UTTERANCE], return_tensors='pt') >>> inputs = tokenizer([UTTERANCE], return_tensors='pt')
>>> inputs.pop("token_type_ids")
>>> reply_ids = model.generate(**inputs) >>> reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
what kind of carbs do they eat? i don't know much about carbs. what kind of carbs do they eat? i don't know much about carbs.
......
...@@ -517,12 +517,11 @@ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r""" ...@@ -517,12 +517,11 @@ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
>>> from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallForConditionalGeneration >>> from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallForConditionalGeneration
>>> mname = 'facebook/blenderbot_small-90M' >>> mname = 'facebook/blenderbot_small-90M'
>>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
>>> tokenizer = TFBlenderbotSmallTokenizer.from_pretrained(mname) >>> tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
>>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> UTTERANCE = "My friends are cool but they eat too many carbs."
>>> print("Human: ", UTTERANCE) >>> print("Human: ", UTTERANCE)
>>> inputs = tokenizer([UTTERANCE], return_tensors='tf') >>> inputs = tokenizer([UTTERANCE], return_tensors='tf')
>>> inputs.pop("token_type_ids")
>>> reply_ids = model.generate(**inputs) >>> reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment