Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
b86e42e0
"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "6070b55443d14ae480a0f359f3aff45308e7341d"
Unverified
Commit
b86e42e0
authored
May 25, 2020
by
Sam Shleifer
Committed by
GitHub
May 25, 2020
Browse files
[ci] fix 3 remaining slow GPU failures (#4584)
parent
365d452d
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
5 additions
and
5 deletions
+5
-5
src/transformers/configuration_distilbert.py
src/transformers/configuration_distilbert.py
+2
-2
src/transformers/modeling_encoder_decoder.py
src/transformers/modeling_encoder_decoder.py
+1
-1
tests/test_modeling_bart.py
tests/test_modeling_bart.py
+1
-1
tests/test_modeling_tf_electra.py
tests/test_modeling_tf_electra.py
+1
-1
No files found.
src/transformers/configuration_distilbert.py
View file @
b86e42e0
...
@@ -73,10 +73,10 @@ class DistilBertConfig(PretrainedConfig):
...
@@ -73,10 +73,10 @@ class DistilBertConfig(PretrainedConfig):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qa_dropout (:obj:`float`, optional, defaults to 0.1):
qa_dropout (:obj:`float`, optional, defaults to 0.1):
The dropout probabilities used in the question answering model
The dropout probabilities used in the question answering model
:class:`~tranformers.DistilBertForQuestionAnswering`.
:class:`~tran
s
formers.DistilBertForQuestionAnswering`.
seq_classif_dropout (:obj:`float`, optional, defaults to 0.2):
seq_classif_dropout (:obj:`float`, optional, defaults to 0.2):
The dropout probabilities used in the sequence classification model
The dropout probabilities used in the sequence classification model
:class:`~tranformers.DistilBertForSequenceClassification`.
:class:`~tran
s
formers.DistilBertForSequenceClassification`.
Example::
Example::
...
...
src/transformers/modeling_encoder_decoder.py
View file @
b86e42e0
...
@@ -125,7 +125,7 @@ class EncoderDecoderModel(PreTrainedModel):
...
@@ -125,7 +125,7 @@ class EncoderDecoderModel(PreTrainedModel):
Examples::
Examples::
from tranformers import EncoderDecoder
from tran
s
formers import EncoderDecoder
model = EncoderDecoder.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
model = EncoderDecoder.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
"""
...
...
tests/test_modeling_bart.py
View file @
b86e42e0
...
@@ -240,7 +240,7 @@ class BartTranslationTests(unittest.TestCase):
...
@@ -240,7 +240,7 @@ class BartTranslationTests(unittest.TestCase):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
logits
,
*
other_stuff
=
model
(
**
self
.
net_input
)
logits
,
*
other_stuff
=
model
(
**
self
.
net_input
)
expected_slice
=
torch
.
tensor
([
9.0078
,
10.1113
,
14.4787
])
expected_slice
=
torch
.
tensor
([
9.0078
,
10.1113
,
14.4787
]
,
device
=
torch_device
)
result_slice
=
logits
[
0
][
0
][:
3
]
result_slice
=
logits
[
0
][
0
][:
3
]
self
.
assertTrue
(
torch
.
allclose
(
expected_slice
,
result_slice
,
atol
=
TOLERANCE
))
self
.
assertTrue
(
torch
.
allclose
(
expected_slice
,
result_slice
,
atol
=
TOLERANCE
))
...
...
tests/test_modeling_tf_electra.py
View file @
b86e42e0
...
@@ -222,6 +222,6 @@ class TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):
...
@@ -222,6 +222,6 @@ class TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):
@
slow
@
slow
def
test_model_from_pretrained
(
self
):
def
test_model_from_pretrained
(
self
):
# for model_name in list(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in list(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for
model_name
in
[
"electra-small-discriminator"
]:
for
model_name
in
[
"
google/
electra-small-discriminator"
]:
model
=
TFElectraModel
.
from_pretrained
(
model_name
)
model
=
TFElectraModel
.
from_pretrained
(
model_name
)
self
.
assertIsNotNone
(
model
)
self
.
assertIsNotNone
(
model
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment