Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
ad2303a4
Commit
ad2303a4
authored
Nov 10, 2020
by
Julien Chaumond
Browse files
Fix style
parent
55e8d0ce
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
4 additions
and
12 deletions
+4
-12
src/transformers/configuration_openai.py
src/transformers/configuration_openai.py
+1
-3
src/transformers/tokenization_bert_generation.py
src/transformers/tokenization_bert_generation.py
+1
-3
src/transformers/tokenization_mobilebert.py
src/transformers/tokenization_mobilebert.py
+1
-3
src/transformers/tokenization_mobilebert_fast.py
src/transformers/tokenization_mobilebert_fast.py
+1
-3
No files found.
src/transformers/configuration_openai.py
View file @
ad2303a4
...
...
@@ -21,9 +21,7 @@ from .utils import logging
logger
=
logging
.
get_logger
(
__name__
)
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
=
{
"openai-gpt"
:
"https://huggingface.co/openai-gpt/resolve/main/config.json"
}
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
=
{
"openai-gpt"
:
"https://huggingface.co/openai-gpt/resolve/main/config.json"
}
class
OpenAIGPTConfig
(
PretrainedConfig
):
...
...
src/transformers/tokenization_bert_generation.py
View file @
ad2303a4
...
...
@@ -29,9 +29,7 @@ logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES
=
{
"vocab_file"
:
"spiece.model"
}
tokenizer_url
=
(
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
)
tokenizer_url
=
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
class
BertGenerationTokenizer
(
PreTrainedTokenizer
):
...
...
src/transformers/tokenization_mobilebert.py
View file @
ad2303a4
...
...
@@ -22,9 +22,7 @@ logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES
=
{
"vocab_file"
:
"vocab.txt"
}
PRETRAINED_VOCAB_FILES_MAP
=
{
"vocab_file"
:
{
"mobilebert-uncased"
:
"https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"
}
"vocab_file"
:
{
"mobilebert-uncased"
:
"https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
=
{
"mobilebert-uncased"
:
512
}
...
...
src/transformers/tokenization_mobilebert_fast.py
View file @
ad2303a4
...
...
@@ -23,9 +23,7 @@ logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES
=
{
"vocab_file"
:
"vocab.txt"
,
"tokenizer_file"
:
"tokenizer.json"
}
PRETRAINED_VOCAB_FILES_MAP
=
{
"vocab_file"
:
{
"mobilebert-uncased"
:
"https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"
},
"vocab_file"
:
{
"mobilebert-uncased"
:
"https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"
},
"tokenizer_file"
:
{
"mobilebert-uncased"
:
"https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment