"docs/source/vscode:/vscode.git/clone" did not exist on "522a9ece4baeb5abfec8953ef76469a530e987d5"
Unverified Commit 16870d11 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix wrong checkpoint paths in doc examples (#14685)


Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 01b8cd59
......@@ -2310,8 +2310,8 @@ class BigBirdForPreTraining(BigBirdPreTrainedModel):
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('google/bigbird-roberta-base')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
......@@ -2534,7 +2534,7 @@ class BigBirdForCausalLM(BigBirdPreTrainedModel):
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> config = BigBirdConfig.from_pretrained("google/bigbird-base")
>>> config = BigBirdConfig.from_pretrained("google/bigbird-roberta-base")
>>> config.is_decoder = True
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
......
......@@ -1623,8 +1623,8 @@ BIGBIRD_PEGASUS_GENERATION_EXAMPLE = r"""
>>> from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig
>>> model = BigBirdPegasusForConditionalGeneration.from_pretrained('bigbird-pegasus-large-arxiv')
>>> tokenizer = PegasusTokenizer.from_pretrained('bigbird-pegasus-large-arxiv')
>>> model = BigBirdPegasusForConditionalGeneration.from_pretrained('google/bigbird-pegasus-large-arxiv')
>>> tokenizer = PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors='pt', truncation=True)
......
......@@ -1087,10 +1087,10 @@ class RemBertForCausalLM(RemBertPreTrainedModel):
>>> from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig
>>> import torch
>>> tokenizer = RemBertTokenizer.from_pretrained('rembert')
>>> config = RemBertConfig.from_pretrained("rembert")
>>> tokenizer = RemBertTokenizer.from_pretrained('google/rembert')
>>> config = RemBertConfig.from_pretrained("google/rembert")
>>> config.is_decoder = True
>>> model = RemBertForCausalLM.from_pretrained('rembert', config=config)
>>> model = RemBertForCausalLM.from_pretrained('google/rembert', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
......
......@@ -490,8 +490,8 @@ class SegformerModel(SegformerPreTrainedModel):
>>> from PIL import Image
>>> import requests
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0")
>>> model = SegformerModel("nvidia/segformer-b0")
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerModel("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
......@@ -726,7 +726,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
>>> import requests
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment