Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
44749001
Unverified
Commit
44749001
authored
Jun 24, 2022
by
Yih-Dar
Committed by
GitHub
Jun 24, 2022
Browse files
Fix Splinter test (#17854)
* fix Co-authored-by:
ydshieh
<
ydshieh@users.noreply.github.com
>
parent
73a0496c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
37 additions
and
1 deletion
+37
-1
tests/models/splinter/test_modeling_splinter.py
tests/models/splinter/test_modeling_splinter.py
+37
-1
No files found.
tests/models/splinter/test_modeling_splinter.py
View file @
44749001
...
...
@@ -18,7 +18,7 @@ import copy
import
unittest
from
transformers
import
is_torch_available
from
transformers.testing_utils
import
require_torch
,
slow
,
torch_device
from
transformers.testing_utils
import
require_torch
,
require_torch_multi_gpu
,
slow
,
torch_device
from
...test_configuration_common
import
ConfigTester
from
...test_modeling_common
import
ModelTesterMixin
,
ids_tensor
,
random_attention_mask
...
...
@@ -316,6 +316,42 @@ class SplinterModelTest(ModelTesterMixin, unittest.TestCase):
model
=
SplinterModel
.
from_pretrained
(
model_name
)
self
.
assertIsNotNone
(
model
)
# overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs.
# When the batch is distributed to multiple devices, each replica could get different values for the maximal number
# of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different
# shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output.
@
require_torch_multi_gpu
def
test_multi_gpu_data_parallel_forward
(
self
):
from
torch
import
nn
config
,
inputs_dict
=
self
.
model_tester
.
prepare_config_and_inputs_for_common
()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params
=
[
"head_mask"
,
"decoder_head_mask"
,
"cross_attn_head_mask"
]
for
k
in
blacklist_non_batched_params
:
inputs_dict
.
pop
(
k
,
None
)
# move input tensors to cuda:O
for
k
,
v
in
inputs_dict
.
items
():
if
torch
.
is_tensor
(
v
):
inputs_dict
[
k
]
=
v
.
to
(
0
)
for
model_class
in
self
.
all_model_classes
:
# Skip this case since it will fail sometimes, as described above.
if
model_class
==
SplinterForPreTraining
:
continue
model
=
model_class
(
config
=
config
)
model
.
to
(
0
)
model
.
eval
()
# Wrap model in nn.DataParallel
model
=
nn
.
DataParallel
(
model
)
with
torch
.
no_grad
():
_
=
model
(
**
self
.
_prepare_for_class
(
inputs_dict
,
model_class
))
@
require_torch
class
SplinterModelIntegrationTest
(
unittest
.
TestCase
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment