Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
7cb1fdd4
Unverified
Commit
7cb1fdd4
authored
Dec 11, 2021
by
Nicolas Patry
Committed by
GitHub
Dec 10, 2021
Browse files
Fixing tests for perceiver (texts) (#14719)
* Fixing tests for perceiver (texts) * For MaskedLM
parent
39fbb068
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
0 deletions
+23
-0
src/transformers/models/perceiver/modeling_perceiver.py
src/transformers/models/perceiver/modeling_perceiver.py
+16
-0
tests/test_modeling_perceiver.py
tests/test_modeling_perceiver.py
+7
-0
No files found.
src/transformers/models/perceiver/modeling_perceiver.py
View file @
7cb1fdd4
...
@@ -915,6 +915,7 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel):
...
@@ -915,6 +915,7 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel):
output_hidden_states
=
None
,
output_hidden_states
=
None
,
labels
=
None
,
labels
=
None
,
return_dict
=
None
,
return_dict
=
None
,
input_ids
=
None
,
):
):
r
"""
r
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
...
@@ -922,6 +923,10 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel):
...
@@ -922,6 +923,10 @@ class PerceiverForMaskedLM(PerceiverPreTrainedModel):
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
"""
if
inputs
is
not
None
and
input_ids
is
not
None
:
raise
ValueError
(
"You cannot use both `inputs` and `input_ids`"
)
elif
inputs
is
None
and
input_ids
is
not
None
:
inputs
=
input_ids
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
...
@@ -994,6 +999,7 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
...
@@ -994,6 +999,7 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
output_hidden_states
=
None
,
output_hidden_states
=
None
,
labels
=
None
,
labels
=
None
,
return_dict
=
None
,
return_dict
=
None
,
input_ids
=
None
,
):
):
r
"""
r
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
...
@@ -1015,6 +1021,10 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
...
@@ -1015,6 +1021,10 @@ class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
>>> outputs = model(inputs=inputs)
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> logits = outputs.logits
"""
"""
if
inputs
is
not
None
and
input_ids
is
not
None
:
raise
ValueError
(
"You cannot use both `inputs` and `input_ids`"
)
elif
inputs
is
None
and
input_ids
is
not
None
:
inputs
=
input_ids
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
...
@@ -1121,6 +1131,7 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
...
@@ -1121,6 +1131,7 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
output_hidden_states
=
None
,
output_hidden_states
=
None
,
labels
=
None
,
labels
=
None
,
return_dict
=
None
,
return_dict
=
None
,
pixel_values
=
None
,
):
):
r
"""
r
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
...
@@ -1149,6 +1160,11 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
...
@@ -1149,6 +1160,11 @@ class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
>>> predicted_class_idx = logits.argmax(-1).item()
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
"""
"""
if
inputs
is
not
None
and
pixel_values
is
not
None
:
raise
ValueError
(
"You cannot use both `inputs` and `pixel_values`"
)
elif
inputs
is
None
and
pixel_values
is
not
None
:
inputs
=
pixel_values
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
return_dict
=
return_dict
if
return_dict
is
not
None
else
self
.
config
.
use_return_dict
outputs
=
self
.
perceiver
(
outputs
=
self
.
perceiver
(
...
...
tests/test_modeling_perceiver.py
View file @
7cb1fdd4
...
@@ -196,6 +196,13 @@ class PerceiverModelTester:
...
@@ -196,6 +196,13 @@ class PerceiverModelTester:
num_labels
=
self
.
num_labels
,
num_labels
=
self
.
num_labels
,
)
)
def
get_pipeline_config
(
self
):
config
=
self
.
get_config
()
# Byte level vocab
config
.
vocab_size
=
261
config
.
max_position_embeddings
=
40
return
config
def
create_and_check_for_masked_lm
(
self
,
config
,
inputs
,
input_mask
,
sequence_labels
,
token_labels
):
def
create_and_check_for_masked_lm
(
self
,
config
,
inputs
,
input_mask
,
sequence_labels
,
token_labels
):
model
=
PerceiverForMaskedLM
(
config
=
config
)
model
=
PerceiverForMaskedLM
(
config
=
config
)
model
.
to
(
torch_device
)
model
.
to
(
torch_device
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment