Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
bb2e088b
Unverified
Commit
bb2e088b
authored
May 02, 2022
by
Lysandre Debut
Committed by
GitHub
May 02, 2022
Browse files
Allow all imports from transformers (#17050)
parent
1ac69874
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
42 additions
and
6 deletions
+42
-6
docs/source/en/model_doc/cpm.mdx
docs/source/en/model_doc/cpm.mdx
+4
-0
src/transformers/__init__.py
src/transformers/__init__.py
+9
-4
src/transformers/models/fnet/__init__.py
src/transformers/models/fnet/__init__.py
+8
-2
src/transformers/utils/dummy_sentencepiece_objects.py
src/transformers/utils/dummy_sentencepiece_objects.py
+14
-0
src/transformers/utils/dummy_tokenizers_objects.py
src/transformers/utils/dummy_tokenizers_objects.py
+7
-0
No files found.
docs/source/en/model_doc/cpm.mdx
View file @
bb2e088b
...
...
@@ -38,3 +38,7 @@ Note: We only have a tokenizer here, since the model architecture is the same as
## CpmTokenizer
[[autodoc]] CpmTokenizer
## CpmTokenizerFast
[[autodoc]] CpmTokenizerFast
src/transformers/__init__.py
View file @
bb2e088b
...
...
@@ -168,7 +168,7 @@ _import_structure = {
],
"models.convbert"
:
[
"CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"ConvBertConfig"
,
"ConvBertTokenizer"
],
"models.convnext"
:
[
"CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"ConvNextConfig"
],
"models.cpm"
:
[
"CpmTokenizer"
],
"models.cpm"
:
[],
"models.ctrl"
:
[
"CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"CTRLConfig"
,
"CTRLTokenizer"
],
"models.data2vec"
:
[
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
...
...
@@ -197,7 +197,7 @@ _import_structure = {
"models.electra"
:
[
"ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"ElectraConfig"
,
"ElectraTokenizer"
],
"models.encoder_decoder"
:
[
"EncoderDecoderConfig"
],
"models.flaubert"
:
[
"FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FlaubertConfig"
,
"FlaubertTokenizer"
],
"models.fnet"
:
[
"FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FNetConfig"
,
"FNetTokenizer"
],
"models.fnet"
:
[
"FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FNetConfig"
],
"models.fsmt"
:
[
"FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FSMTConfig"
,
"FSMTTokenizer"
],
"models.funnel"
:
[
"FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FunnelConfig"
,
"FunnelTokenizer"
],
"models.glpn"
:
[
"GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"GLPNConfig"
],
...
...
@@ -419,7 +419,9 @@ if is_sentencepiece_available():
_import_structure
[
"models.bert_generation"
].
append
(
"BertGenerationTokenizer"
)
_import_structure
[
"models.big_bird"
].
append
(
"BigBirdTokenizer"
)
_import_structure
[
"models.camembert"
].
append
(
"CamembertTokenizer"
)
_import_structure
[
"models.cpm"
].
append
(
"CpmTokenizer"
)
_import_structure
[
"models.deberta_v2"
].
append
(
"DebertaV2Tokenizer"
)
_import_structure
[
"models.fnet"
].
append
(
"FNetTokenizer"
)
_import_structure
[
"models.layoutxlm"
].
append
(
"LayoutXLMTokenizer"
)
_import_structure
[
"models.m2m_100"
].
append
(
"M2M100Tokenizer"
)
_import_structure
[
"models.marian"
].
append
(
"MarianTokenizer"
)
...
...
@@ -457,6 +459,7 @@ if is_tokenizers_available():
_import_structure
[
"models.camembert"
].
append
(
"CamembertTokenizerFast"
)
_import_structure
[
"models.clip"
].
append
(
"CLIPTokenizerFast"
)
_import_structure
[
"models.convbert"
].
append
(
"ConvBertTokenizerFast"
)
_import_structure
[
"models.cpm"
].
append
(
"CpmTokenizerFast"
)
_import_structure
[
"models.deberta"
].
append
(
"DebertaTokenizerFast"
)
_import_structure
[
"models.deberta_v2"
].
append
(
"DebertaV2TokenizerFast"
)
_import_structure
[
"models.distilbert"
].
append
(
"DistilBertTokenizerFast"
)
...
...
@@ -2575,7 +2578,6 @@ if TYPE_CHECKING:
)
from
.models.convbert
import
CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
,
ConvBertConfig
,
ConvBertTokenizer
from
.models.convnext
import
CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP
,
ConvNextConfig
from
.models.cpm
import
CpmTokenizer
from
.models.ctrl
import
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
,
CTRLConfig
,
CTRLTokenizer
from
.models.data2vec
import
(
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP
,
...
...
@@ -2605,7 +2607,7 @@ if TYPE_CHECKING:
from
.models.electra
import
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP
,
ElectraConfig
,
ElectraTokenizer
from
.models.encoder_decoder
import
EncoderDecoderConfig
from
.models.flaubert
import
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FlaubertConfig
,
FlaubertTokenizer
from
.models.fnet
import
FNET_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FNetConfig
,
FNetTokenizer
from
.models.fnet
import
FNET_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FNetConfig
from
.models.fsmt
import
FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FSMTConfig
,
FSMTTokenizer
from
.models.funnel
import
FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FunnelConfig
,
FunnelTokenizer
from
.models.glpn
import
GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP
,
GLPNConfig
...
...
@@ -2809,7 +2811,9 @@ if TYPE_CHECKING:
from
.models.bert_generation
import
BertGenerationTokenizer
from
.models.big_bird
import
BigBirdTokenizer
from
.models.camembert
import
CamembertTokenizer
from
.models.cpm
import
CpmTokenizer
from
.models.deberta_v2
import
DebertaV2Tokenizer
from
.models.fnet
import
FNetTokenizer
from
.models.layoutxlm
import
LayoutXLMTokenizer
from
.models.m2m_100
import
M2M100Tokenizer
from
.models.marian
import
MarianTokenizer
...
...
@@ -2840,6 +2844,7 @@ if TYPE_CHECKING:
from
.models.camembert
import
CamembertTokenizerFast
from
.models.clip
import
CLIPTokenizerFast
from
.models.convbert
import
ConvBertTokenizerFast
from
.models.cpm
import
CpmTokenizerFast
from
.models.deberta
import
DebertaTokenizerFast
from
.models.deberta_v2
import
DebertaV2TokenizerFast
from
.models.distilbert
import
DistilBertTokenizerFast
...
...
src/transformers/models/fnet/__init__.py
View file @
bb2e088b
...
...
@@ -17,14 +17,18 @@
# limitations under the License.
from
typing
import
TYPE_CHECKING
from
transformers
import
is_sentencepiece_available
from
...utils
import
_LazyModule
,
is_tokenizers_available
,
is_torch_available
_import_structure
=
{
"configuration_fnet"
:
[
"FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"
,
"FNetConfig"
],
"tokenization_fnet"
:
[
"FNetTokenizer"
],
}
if
is_sentencepiece_available
():
_import_structure
[
"tokenization_fnet"
]
=
[
"FNetTokenizer"
]
if
is_tokenizers_available
():
_import_structure
[
"tokenization_fnet_fast"
]
=
[
"FNetTokenizerFast"
]
...
...
@@ -46,6 +50,8 @@ if is_torch_available():
if
TYPE_CHECKING
:
from
.configuration_fnet
import
FNET_PRETRAINED_CONFIG_ARCHIVE_MAP
,
FNetConfig
if
is_sentencepiece_available
():
from
.tokenization_fnet
import
FNetTokenizer
if
is_tokenizers_available
():
...
...
src/transformers/utils/dummy_sentencepiece_objects.py
View file @
bb2e088b
...
...
@@ -45,6 +45,13 @@ class CamembertTokenizer(metaclass=DummyObject):
requires_backends
(
self
,
[
"sentencepiece"
])
class
CpmTokenizer
(
metaclass
=
DummyObject
):
_backends
=
[
"sentencepiece"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
requires_backends
(
self
,
[
"sentencepiece"
])
class
DebertaV2Tokenizer
(
metaclass
=
DummyObject
):
_backends
=
[
"sentencepiece"
]
...
...
@@ -52,6 +59,13 @@ class DebertaV2Tokenizer(metaclass=DummyObject):
requires_backends
(
self
,
[
"sentencepiece"
])
class
FNetTokenizer
(
metaclass
=
DummyObject
):
_backends
=
[
"sentencepiece"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
requires_backends
(
self
,
[
"sentencepiece"
])
class
LayoutXLMTokenizer
(
metaclass
=
DummyObject
):
_backends
=
[
"sentencepiece"
]
...
...
src/transformers/utils/dummy_tokenizers_objects.py
View file @
bb2e088b
...
...
@@ -73,6 +73,13 @@ class ConvBertTokenizerFast(metaclass=DummyObject):
requires_backends
(
self
,
[
"tokenizers"
])
class
CpmTokenizerFast
(
metaclass
=
DummyObject
):
_backends
=
[
"tokenizers"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
requires_backends
(
self
,
[
"tokenizers"
])
class
DebertaTokenizerFast
(
metaclass
=
DummyObject
):
_backends
=
[
"tokenizers"
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment