Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
b7175a27
Commit
b7175a27
authored
Sep 09, 2019
by
thomwolf
Browse files
fixed imports in tests and gpt2 config test
parent
72863735
Changes
20
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
53 additions
and
40 deletions
+53
-40
pytorch_transformers/__init__.py
pytorch_transformers/__init__.py
+1
-1
pytorch_transformers/modeling_tf_gpt2.py
pytorch_transformers/modeling_tf_gpt2.py
+1
-0
pytorch_transformers/modeling_tf_utils.py
pytorch_transformers/modeling_tf_utils.py
+3
-2
pytorch_transformers/modeling_xlnet.py
pytorch_transformers/modeling_xlnet.py
+4
-2
pytorch_transformers/tests/modeling_auto_test.py
pytorch_transformers/tests/modeling_auto_test.py
+4
-2
pytorch_transformers/tests/modeling_bert_test.py
pytorch_transformers/tests/modeling_bert_test.py
+2
-2
pytorch_transformers/tests/modeling_common_test.py
pytorch_transformers/tests/modeling_common_test.py
+4
-2
pytorch_transformers/tests/modeling_distilbert_test.py
pytorch_transformers/tests/modeling_distilbert_test.py
+2
-2
pytorch_transformers/tests/modeling_gpt2_test.py
pytorch_transformers/tests/modeling_gpt2_test.py
+2
-2
pytorch_transformers/tests/modeling_openai_test.py
pytorch_transformers/tests/modeling_openai_test.py
+2
-2
pytorch_transformers/tests/modeling_roberta_test.py
pytorch_transformers/tests/modeling_roberta_test.py
+2
-2
pytorch_transformers/tests/modeling_tf_auto_test.py
pytorch_transformers/tests/modeling_tf_auto_test.py
+4
-2
pytorch_transformers/tests/modeling_tf_bert_test.py
pytorch_transformers/tests/modeling_tf_bert_test.py
+2
-2
pytorch_transformers/tests/modeling_tf_common_test.py
pytorch_transformers/tests/modeling_tf_common_test.py
+4
-2
pytorch_transformers/tests/modeling_tf_gpt2_test.py
pytorch_transformers/tests/modeling_tf_gpt2_test.py
+6
-5
pytorch_transformers/tests/modeling_transfo_xl_test.py
pytorch_transformers/tests/modeling_transfo_xl_test.py
+2
-2
pytorch_transformers/tests/modeling_xlm_test.py
pytorch_transformers/tests/modeling_xlm_test.py
+2
-2
pytorch_transformers/tests/modeling_xlnet_test.py
pytorch_transformers/tests/modeling_xlnet_test.py
+2
-2
pytorch_transformers/tests/optimization_test.py
pytorch_transformers/tests/optimization_test.py
+2
-2
pytorch_transformers/tests/tokenization_transfo_xl_test.py
pytorch_transformers/tests/tokenization_transfo_xl_test.py
+2
-2
No files found.
pytorch_transformers/__init__.py
View file @
b7175a27
...
...
@@ -89,7 +89,7 @@ try:
import
tensorflow
as
tf
assert
int
(
tf
.
__version__
[
0
])
>=
2
_tf_available
=
True
# pylint: disable=invalid-name
except
ImportError
:
except
(
ImportError
,
AssertionError
)
:
_tf_available
=
False
# pylint: disable=invalid-name
if
_tf_available
:
...
...
pytorch_transformers/modeling_tf_gpt2.py
View file @
b7175a27
...
...
@@ -699,6 +699,7 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel):
head_mask
=
inputs
.
get
(
'head_mask'
,
None
)
assert
len
(
inputs
)
<=
5
,
"Too many inputs."
assert
len
(
shape_list
(
input_ids
))
==
3
,
"Inputs should have 3 dimensions: batch, choices, sequence length"
num_choices
=
shape_list
(
input_ids
)[
1
]
seq_length
=
shape_list
(
input_ids
)[
2
]
...
...
pytorch_transformers/modeling_tf_utils.py
View file @
b7175a27
...
...
@@ -313,7 +313,7 @@ class TFSequenceSummary(tf.keras.layers.Layer):
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise
NotImplementedError
self
.
summary
=
tf
.
keras
.
layers
.
Identity
(
name
=
'summary'
)
self
.
summary
=
None
if
hasattr
(
config
,
'summary_use_proj'
)
and
config
.
summary_use_proj
:
if
hasattr
(
config
,
'summary_proj_to_labels'
)
and
config
.
summary_proj_to_labels
and
config
.
num_labels
>
0
:
num_classes
=
config
.
num_labels
...
...
@@ -372,7 +372,8 @@ class TFSequenceSummary(tf.keras.layers.Layer):
if
training
and
self
.
first_dropout
is
not
None
:
output
=
self
.
first_dropout
(
output
)
output
=
self
.
summary
(
output
)
if
self
.
summary
is
not
None
:
output
=
self
.
summary
(
output
)
if
self
.
activation
is
not
None
:
output
=
self
.
activation
(
output
)
...
...
pytorch_transformers/modeling_xlnet.py
View file @
b7175a27
...
...
@@ -525,8 +525,10 @@ XLNET_INPUTS_DOCSTRING = r"""
Only used during pretraining for partial prediction or for sequential decoding (generation).
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and
the important thing is that they should be different for tokens which belong to different segments.
The model will compute relative segment differences from the given type indices:
0 if the segment id of two tokens are the same, 1 if not.
**input_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
...
...
pytorch_transformers/tests/modeling_auto_test.py
View file @
b7175a27
...
...
@@ -21,7 +21,9 @@ import shutil
import
pytest
import
logging
try
:
from
pytorch_transformers
import
is_torch_available
if
is_torch_available
():
from
pytorch_transformers
import
(
AutoConfig
,
BertConfig
,
AutoModel
,
BertModel
,
AutoModelWithLMHead
,
BertForMaskedLM
,
...
...
@@ -31,7 +33,7 @@ try:
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
from
.configuration_common_test
import
ConfigTester
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
...
...
pytorch_transformers/tests/modeling_bert_test.py
View file @
b7175a27
...
...
@@ -25,13 +25,13 @@ from pytorch_transformers import is_torch_available
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
from
.configuration_common_test
import
ConfigTester
try
:
if
is_torch_available
()
:
from
pytorch_transformers
import
(
BertConfig
,
BertModel
,
BertForMaskedLM
,
BertForNextSentencePrediction
,
BertForPreTraining
,
BertForQuestionAnswering
,
BertForSequenceClassification
,
BertForTokenClassification
,
BertForMultipleChoice
)
from
pytorch_transformers.modeling_bert
import
BERT_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
...
...
pytorch_transformers/tests/modeling_common_test.py
View file @
b7175a27
...
...
@@ -27,13 +27,15 @@ import unittest
import
logging
import
pytest
try
:
from
pytorch_transformers
import
is_torch_available
if
is_torch_available
():
import
torch
from
pytorch_transformers
import
(
PretrainedConfig
,
PreTrainedModel
,
BertModel
,
BertConfig
,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP
,
GPT2LMHeadModel
,
GPT2Config
,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
...
...
pytorch_transformers/tests/modeling_distilbert_test.py
View file @
b7175a27
...
...
@@ -21,10 +21,10 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
from
pytorch_transformers
import
(
DistilBertConfig
,
DistilBertModel
,
DistilBertForMaskedLM
,
DistilBertForQuestionAnswering
,
DistilBertForSequenceClassification
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_gpt2_test.py
View file @
b7175a27
...
...
@@ -22,10 +22,10 @@ import shutil
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
from
pytorch_transformers
import
(
GPT2Config
,
GPT2Model
,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
,
GPT2LMHeadModel
,
GPT2DoubleHeadsModel
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_openai_test.py
View file @
b7175a27
...
...
@@ -22,10 +22,10 @@ import shutil
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
from
pytorch_transformers
import
(
OpenAIGPTConfig
,
OpenAIGPTModel
,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
,
OpenAIGPTLMHeadModel
,
OpenAIGPTDoubleHeadsModel
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_roberta_test.py
View file @
b7175a27
...
...
@@ -22,11 +22,11 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
import
torch
from
pytorch_transformers
import
(
RobertaConfig
,
RobertaModel
,
RobertaForMaskedLM
,
RobertaForSequenceClassification
)
from
pytorch_transformers.modeling_roberta
import
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_tf_auto_test.py
View file @
b7175a27
...
...
@@ -21,7 +21,9 @@ import shutil
import
pytest
import
logging
try
:
from
pytorch_transformers
import
is_tf_available
if
is_tf_available
():
from
pytorch_transformers
import
(
AutoConfig
,
BertConfig
,
TFAutoModel
,
TFBertModel
,
TFAutoModelWithLMHead
,
TFBertForMaskedLM
,
...
...
@@ -31,7 +33,7 @@ try:
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
from
.configuration_common_test
import
ConfigTester
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require TensorFlow"
)
...
...
pytorch_transformers/tests/modeling_tf_bert_test.py
View file @
b7175a27
...
...
@@ -26,7 +26,7 @@ from .configuration_common_test import ConfigTester
from
pytorch_transformers
import
BertConfig
,
is_tf_available
try
:
if
is_tf_available
()
:
import
tensorflow
as
tf
from
pytorch_transformers.modeling_tf_bert
import
(
TFBertModel
,
TFBertForMaskedLM
,
TFBertForNextSentencePrediction
,
...
...
@@ -36,7 +36,7 @@ try:
TFBertForTokenClassification
,
TFBertForQuestionAnswering
,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require TensorFlow"
)
...
...
pytorch_transformers/tests/modeling_tf_common_test.py
View file @
b7175a27
...
...
@@ -25,11 +25,13 @@ import uuid
import
pytest
import
sys
try
:
from
pytorch_transformers
import
is_tf_available
if
is_tf_available
():
import
tensorflow
as
tf
from
pytorch_transformers
import
TFPreTrainedModel
# from pytorch_transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require TensorFlow"
)
...
...
pytorch_transformers/tests/modeling_tf_gpt2_test.py
View file @
b7175a27
...
...
@@ -26,19 +26,20 @@ from .configuration_common_test import ConfigTester
from
pytorch_transformers
import
GPT2Config
,
is_tf_available
try
:
if
is_tf_available
()
:
import
tensorflow
as
tf
from
pytorch_transformers.modeling_tf_gpt2
import
(
TFGPT2Model
,
TFGPT2LMHeadModel
,
TFGPT2DoubleHeadsModel
,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require TensorFlow"
)
class
TFGPT2ModelTest
(
TFCommonTestCases
.
TFCommonModelTester
):
all_model_classes
=
(
TFGPT2Model
,
TFGPT2LMHeadModel
,
TFGPT2DoubleHeadsModel
)
if
is_tf_available
()
else
()
# all_model_classes = (TFGPT2Model, TFGPT2LMHeadModel,
# TFGPT2DoubleHeadsModel) if is_tf_available() else ()
all_model_classes
=
(
TFGPT2Model
,
TFGPT2LMHeadModel
)
if
is_tf_available
()
else
()
class
TFGPT2ModelTester
(
object
):
...
...
@@ -186,7 +187,7 @@ class TFGPT2ModelTest(TFCommonTestCases.TFCommonModelTester):
def
setUp
(
self
):
self
.
model_tester
=
TFGPT2ModelTest
.
TFGPT2ModelTester
(
self
)
self
.
config_tester
=
ConfigTester
(
self
,
config_class
=
GPT2Config
,
hidden_size
=
37
)
self
.
config_tester
=
ConfigTester
(
self
,
config_class
=
GPT2Config
,
n_embd
=
37
)
def
test_config
(
self
):
self
.
config_tester
.
run_common_tests
()
...
...
pytorch_transformers/tests/modeling_transfo_xl_test.py
View file @
b7175a27
...
...
@@ -23,11 +23,11 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
import
torch
from
pytorch_transformers
import
(
TransfoXLConfig
,
TransfoXLModel
,
TransfoXLLMHeadModel
)
from
pytorch_transformers.modeling_transfo_xl
import
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_xlm_test.py
View file @
b7175a27
...
...
@@ -22,11 +22,11 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
from
pytorch_transformers
import
(
XLMConfig
,
XLMModel
,
XLMWithLMHeadModel
,
XLMForQuestionAnswering
,
XLMForSequenceClassification
)
from
pytorch_transformers.modeling_xlm
import
XLM_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/modeling_xlnet_test.py
View file @
b7175a27
...
...
@@ -25,12 +25,12 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
import
torch
from
pytorch_transformers
import
(
XLNetConfig
,
XLNetModel
,
XLNetLMHeadModel
,
XLNetForSequenceClassification
,
XLNetForQuestionAnswering
)
from
pytorch_transformers.modeling_xlnet
import
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.modeling_common_test
import
(
CommonTestCases
,
ids_tensor
)
...
...
pytorch_transformers/tests/optimization_test.py
View file @
b7175a27
...
...
@@ -22,12 +22,12 @@ import pytest
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
import
torch
from
pytorch_transformers
import
(
AdamW
,
ConstantLRSchedule
,
WarmupConstantSchedule
,
WarmupCosineSchedule
,
WarmupCosineWithHardRestartsSchedule
,
WarmupLinearSchedule
)
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
from
.tokenization_tests_commons
import
TemporaryDirectory
...
...
pytorch_transformers/tests/tokenization_transfo_xl_test.py
View file @
b7175a27
...
...
@@ -21,10 +21,10 @@ from io import open
from
pytorch_transformers
import
is_torch_available
try
:
if
is_torch_available
()
:
import
torch
from
pytorch_transformers.tokenization_transfo_xl
import
TransfoXLTokenizer
,
VOCAB_FILES_NAMES
e
xcept
ImportError
:
e
lse
:
pytestmark
=
pytest
.
mark
.
skip
(
"Require Torch"
)
# TODO: untangle Transfo-XL tokenizer from torch.load and torch.save
from
.tokenization_tests_commons
import
CommonTestCases
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment