Unverified Commit 466144d4 authored by yesinkim's avatar yesinkim Committed by GitHub
Browse files

[Docs] fix typos in some tokenizer docs (#22256)



[Docs] fix typos
Co-authored-by: default avataryesinkim <yesinkim@yesinkimui-MacBookAir.local>
parent a48310de
...@@ -124,7 +124,7 @@ class LongformerTokenizer(PreTrainedTokenizer): ...@@ -124,7 +124,7 @@ class LongformerTokenizer(PreTrainedTokenizer):
>>> from transformers import LongformerTokenizer >>> from transformers import LongformerTokenizer
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
>>> tokenizer("Hello world")['input_ids'] >>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2] [0, 31414, 232, 2]
>>> tokenizer(" Hello world")['input_ids'] >>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2] [0, 20920, 232, 2]
``` ```
......
...@@ -100,7 +100,7 @@ class LongformerTokenizerFast(PreTrainedTokenizerFast): ...@@ -100,7 +100,7 @@ class LongformerTokenizerFast(PreTrainedTokenizerFast):
>>> from transformers import LongformerTokenizerFast >>> from transformers import LongformerTokenizerFast
>>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096") >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096")
>>> tokenizer("Hello world")['input_ids'] >>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2] [0, 31414, 232, 2]
>>> tokenizer(" Hello world")['input_ids'] >>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2] [0, 20920, 232, 2]
``` ```
......
...@@ -115,7 +115,7 @@ class RobertaTokenizer(PreTrainedTokenizer): ...@@ -115,7 +115,7 @@ class RobertaTokenizer(PreTrainedTokenizer):
>>> from transformers import RobertaTokenizer >>> from transformers import RobertaTokenizer
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base") >>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> tokenizer("Hello world")['input_ids'] >>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2] [0, 31414, 232, 2]
>>> tokenizer(" Hello world")['input_ids'] >>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2] [0, 20920, 232, 2]
``` ```
......
...@@ -85,7 +85,7 @@ class RobertaTokenizerFast(PreTrainedTokenizerFast): ...@@ -85,7 +85,7 @@ class RobertaTokenizerFast(PreTrainedTokenizerFast):
>>> from transformers import RobertaTokenizerFast >>> from transformers import RobertaTokenizerFast
>>> tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") >>> tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
>>> tokenizer("Hello world")['input_ids'] >>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2] [0, 31414, 232, 2]
>>> tokenizer(" Hello world")['input_ids'] >>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2] [0, 20920, 232, 2]
``` ```
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment