Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
16870d11
Unverified
Commit
16870d11
authored
Dec 08, 2021
by
Yih-Dar
Committed by
GitHub
Dec 08, 2021
Browse files
Fix wrong checkpoint paths in doc examples (#14685)
Co-authored-by:
ydshieh
<
ydshieh@users.noreply.github.com
>
parent
01b8cd59
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
11 additions
and
11 deletions
+11
-11
src/transformers/models/big_bird/modeling_big_bird.py
src/transformers/models/big_bird/modeling_big_bird.py
+3
-3
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
...ormers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
+2
-2
src/transformers/models/rembert/modeling_rembert.py
src/transformers/models/rembert/modeling_rembert.py
+3
-3
src/transformers/models/segformer/modeling_segformer.py
src/transformers/models/segformer/modeling_segformer.py
+3
-3
No files found.
src/transformers/models/big_bird/modeling_big_bird.py
View file @
16870d11
...
@@ -2310,8 +2310,8 @@ class BigBirdForPreTraining(BigBirdPreTrainedModel):
...
@@ -2310,8 +2310,8 @@ class BigBirdForPreTraining(BigBirdPreTrainedModel):
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> import torch
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')
>>> tokenizer = BigBirdTokenizer.from_pretrained('
google/
bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('
google/
bigbird-roberta-base')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs = model(**inputs)
...
@@ -2534,7 +2534,7 @@ class BigBirdForCausalLM(BigBirdPreTrainedModel):
...
@@ -2534,7 +2534,7 @@ class BigBirdForCausalLM(BigBirdPreTrainedModel):
>>> import torch
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> config = BigBirdConfig.from_pretrained("google/bigbird-base")
>>> config = BigBirdConfig.from_pretrained("google/bigbird-
roberta-
base")
>>> config.is_decoder = True
>>> config.is_decoder = True
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
...
...
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
View file @
16870d11
...
@@ -1623,8 +1623,8 @@ BIGBIRD_PEGASUS_GENERATION_EXAMPLE = r"""
...
@@ -1623,8 +1623,8 @@ BIGBIRD_PEGASUS_GENERATION_EXAMPLE = r"""
>>> from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig
>>> from transformers import PegasusTokenizer, BigBirdPegasusForConditionalGeneration, BigBirdPegasusConfig
>>> model = BigBirdPegasusForConditionalGeneration.from_pretrained('bigbird-pegasus-large-arxiv')
>>> model = BigBirdPegasusForConditionalGeneration.from_pretrained('
google/
bigbird-pegasus-large-arxiv')
>>> tokenizer = PegasusTokenizer.from_pretrained('bigbird-pegasus-large-arxiv')
>>> tokenizer = PegasusTokenizer.from_pretrained('
google/
bigbird-pegasus-large-arxiv')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors='pt', truncation=True)
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors='pt', truncation=True)
...
...
src/transformers/models/rembert/modeling_rembert.py
View file @
16870d11
...
@@ -1087,10 +1087,10 @@ class RemBertForCausalLM(RemBertPreTrainedModel):
...
@@ -1087,10 +1087,10 @@ class RemBertForCausalLM(RemBertPreTrainedModel):
>>> from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig
>>> from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig
>>> import torch
>>> import torch
>>> tokenizer = RemBertTokenizer.from_pretrained('rembert')
>>> tokenizer = RemBertTokenizer.from_pretrained('
google/
rembert')
>>> config = RemBertConfig.from_pretrained("rembert")
>>> config = RemBertConfig.from_pretrained("
google/
rembert")
>>> config.is_decoder = True
>>> config.is_decoder = True
>>> model = RemBertForCausalLM.from_pretrained('rembert', config=config)
>>> model = RemBertForCausalLM.from_pretrained('
google/
rembert', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs = model(**inputs)
...
...
src/transformers/models/segformer/modeling_segformer.py
View file @
16870d11
...
@@ -490,8 +490,8 @@ class SegformerModel(SegformerPreTrainedModel):
...
@@ -490,8 +490,8 @@ class SegformerModel(SegformerPreTrainedModel):
>>> from PIL import Image
>>> from PIL import Image
>>> import requests
>>> import requests
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0")
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0
-finetuned-ade-512-512
")
>>> model = SegformerModel("nvidia/segformer-b0")
>>> model = SegformerModel("nvidia/segformer-b0
-finetuned-ade-512-512
")
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image = Image.open(requests.get(url, stream=True).raw)
...
@@ -726,7 +726,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
...
@@ -726,7 +726,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
>>> import requests
>>> import requests
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation
.from_pretrained
("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image = Image.open(requests.get(url, stream=True).raw)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment