Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f1fe1846
Unverified
Commit
f1fe1846
authored
Jun 05, 2020
by
Sylvain Gugger
Committed by
GitHub
Jun 05, 2020
Browse files
Use labels to remove deprecation warnings (#4807)
parent
5c0cfc2c
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
17 additions
and
17 deletions
+17
-17
tests/test_modeling_albert.py
tests/test_modeling_albert.py
+2
-2
tests/test_modeling_bart.py
tests/test_modeling_bart.py
+3
-3
tests/test_modeling_bert.py
tests/test_modeling_bert.py
+4
-4
tests/test_modeling_distilbert.py
tests/test_modeling_distilbert.py
+1
-1
tests/test_modeling_electra.py
tests/test_modeling_electra.py
+1
-1
tests/test_modeling_gpt2.py
tests/test_modeling_gpt2.py
+1
-1
tests/test_modeling_longformer.py
tests/test_modeling_longformer.py
+2
-2
tests/test_modeling_openai.py
tests/test_modeling_openai.py
+1
-1
tests/test_modeling_roberta.py
tests/test_modeling_roberta.py
+1
-1
tests/test_modeling_t5.py
tests/test_modeling_t5.py
+1
-1
No files found.
tests/test_modeling_albert.py
View file @
f1fe1846
...
...
@@ -162,7 +162,7 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
,
labels
=
token_labels
,
sentence_order_label
=
sequence_labels
,
)
result
=
{
...
...
@@ -183,7 +183,7 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
...
...
tests/test_modeling_bart.py
View file @
f1fe1846
...
...
@@ -296,7 +296,7 @@ class BartTranslationTests(unittest.TestCase):
lm_model
=
BartForConditionalGeneration
(
config
).
to
(
torch_device
)
context
=
torch
.
Tensor
([[
71
,
82
,
18
,
33
,
46
,
91
,
2
],
[
68
,
34
,
26
,
58
,
30
,
2
,
1
]]).
long
().
to
(
torch_device
)
summary
=
torch
.
Tensor
([[
82
,
71
,
82
,
18
,
2
],
[
58
,
68
,
2
,
1
,
1
]]).
long
().
to
(
torch_device
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
context
,
decoder_input_ids
=
summary
,
lm_
labels
=
summary
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
context
,
decoder_input_ids
=
summary
,
labels
=
summary
)
expected_shape
=
(
*
summary
.
shape
,
config
.
vocab_size
)
self
.
assertEqual
(
logits
.
shape
,
expected_shape
)
...
...
@@ -361,7 +361,7 @@ class BartHeadTests(unittest.TestCase):
lm_labels
=
ids_tensor
([
batch_size
,
input_ids
.
shape
[
1
]],
self
.
vocab_size
).
to
(
torch_device
)
lm_model
=
BartForConditionalGeneration
(
config
)
lm_model
.
to
(
torch_device
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
input_ids
,
lm_
labels
=
lm_labels
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
input_ids
,
labels
=
lm_labels
)
expected_shape
=
(
batch_size
,
input_ids
.
shape
[
1
],
config
.
vocab_size
)
self
.
assertEqual
(
logits
.
shape
,
expected_shape
)
self
.
assertIsInstance
(
loss
.
item
(),
float
)
...
...
@@ -381,7 +381,7 @@ class BartHeadTests(unittest.TestCase):
lm_model
=
BartForConditionalGeneration
(
config
).
to
(
torch_device
)
context
=
torch
.
Tensor
([[
71
,
82
,
18
,
33
,
46
,
91
,
2
],
[
68
,
34
,
26
,
58
,
30
,
2
,
1
]]).
long
().
to
(
torch_device
)
summary
=
torch
.
Tensor
([[
82
,
71
,
82
,
18
,
2
],
[
58
,
68
,
2
,
1
,
1
]]).
long
().
to
(
torch_device
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
context
,
decoder_input_ids
=
summary
,
lm_
labels
=
summary
)
loss
,
logits
,
enc_features
=
lm_model
(
input_ids
=
context
,
decoder_input_ids
=
summary
,
labels
=
summary
)
expected_shape
=
(
*
summary
.
shape
,
config
.
vocab_size
)
self
.
assertEqual
(
logits
.
shape
,
expected_shape
)
...
...
tests/test_modeling_bert.py
View file @
f1fe1846
...
...
@@ -218,7 +218,7 @@ class BertModelTester:
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
...
...
@@ -248,7 +248,7 @@ class BertModelTester:
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
,
labels
=
token_labels
,
encoder_hidden_states
=
encoder_hidden_states
,
encoder_attention_mask
=
encoder_attention_mask
,
)
...
...
@@ -256,7 +256,7 @@ class BertModelTester:
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
,
labels
=
token_labels
,
encoder_hidden_states
=
encoder_hidden_states
,
)
result
=
{
...
...
@@ -294,7 +294,7 @@ class BertModelTester:
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
,
labels
=
token_labels
,
next_sentence_label
=
sequence_labels
,
)
result
=
{
...
...
tests/test_modeling_distilbert.py
View file @
f1fe1846
...
...
@@ -151,7 +151,7 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
model
=
DistilBertForMaskedLM
(
config
=
config
)
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
masked_lm_
labels
=
token_labels
)
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
"prediction_scores"
:
prediction_scores
,
...
...
tests/test_modeling_electra.py
View file @
f1fe1846
...
...
@@ -180,7 +180,7 @@ class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
...
...
tests/test_modeling_gpt2.py
View file @
f1fe1846
...
...
@@ -268,7 +268,7 @@ class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
"mc_token_ids"
:
mc_token_ids
,
"attention_mask"
:
multiple_choice_input_mask
,
"token_type_ids"
:
multiple_choice_token_type_ids
,
"
lm_
labels"
:
multiple_choice_inputs_ids
,
"labels"
:
multiple_choice_inputs_ids
,
}
loss
,
lm_logits
,
mc_logits
,
_
=
model
(
**
inputs
)
...
...
tests/test_modeling_longformer.py
View file @
f1fe1846
...
...
@@ -164,7 +164,7 @@ class LongformerModelTester(object):
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
...
...
@@ -361,7 +361,7 @@ class LongformerModelIntegrationTest(unittest.TestCase):
[[
0
]
+
[
20920
,
232
,
328
,
1437
]
*
1000
+
[
2
]],
dtype
=
torch
.
long
,
device
=
torch_device
)
# long input
loss
,
prediction_scores
=
model
(
input_ids
,
masked_lm_
labels
=
input_ids
)
loss
,
prediction_scores
=
model
(
input_ids
,
labels
=
input_ids
)
expected_loss
=
torch
.
tensor
(
0.0620
,
device
=
torch_device
)
expected_prediction_scores_sum
=
torch
.
tensor
(
-
6.1599e08
,
device
=
torch_device
)
...
...
tests/test_modeling_openai.py
View file @
f1fe1846
...
...
@@ -169,7 +169,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
lm_logits
,
mc_logits
=
model
(
input_ids
,
token_type_ids
=
token_type_ids
,
lm_
labels
=
input_ids
)
loss
,
lm_logits
,
mc_logits
=
model
(
input_ids
,
token_type_ids
=
token_type_ids
,
labels
=
input_ids
)
result
=
{
"loss"
:
loss
,
"lm_logits"
:
lm_logits
}
...
...
tests/test_modeling_roberta.py
View file @
f1fe1846
...
...
@@ -155,7 +155,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
model
.
to
(
torch_device
)
model
.
eval
()
loss
,
prediction_scores
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_
labels
=
token_labels
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
result
=
{
"loss"
:
loss
,
...
...
tests/test_modeling_t5.py
View file @
f1fe1846
...
...
@@ -206,7 +206,7 @@ class T5ModelTest(ModelTesterMixin, unittest.TestCase):
input_ids
=
input_ids
,
decoder_input_ids
=
decoder_input_ids
,
decoder_attention_mask
=
decoder_attention_mask
,
lm_
labels
=
lm_labels
,
labels
=
lm_labels
,
)
loss
,
prediction_scores
,
_
,
_
=
outputs
self
.
parent
.
assertEqual
(
len
(
outputs
),
4
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment