Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
02bdfc02
Unverified
Commit
02bdfc02
authored
Nov 10, 2020
by
Stas Bekman
Committed by
GitHub
Nov 10, 2020
Browse files
using multi_gpu consistently (#8446)
* s|multiple_gpu|multi_gpu|g; s|multigpu|multi_gpu|g' * doc
parent
b9356945
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
5 deletions
+5
-5
tests/test_modeling_transfo_xl.py
tests/test_modeling_transfo_xl.py
+3
-3
tests/test_trainer_distributed.py
tests/test_trainer_distributed.py
+2
-2
No files found.
tests/test_modeling_transfo_xl.py
View file @
02bdfc02
...
...
@@ -17,7 +17,7 @@ import random
import
unittest
from
transformers
import
is_torch_available
from
transformers.testing_utils
import
require_torch
,
require_torch_multigpu
,
slow
,
torch_device
from
transformers.testing_utils
import
require_torch
,
require_torch_multi
_
gpu
,
slow
,
torch_device
from
.test_configuration_common
import
ConfigTester
from
.test_generation_utils
import
GenerationTesterMixin
...
...
@@ -205,8 +205,8 @@ class TransfoXLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestC
output_result
=
self
.
model_tester
.
create_transfo_xl_lm_head
(
*
config_and_inputs
)
self
.
model_tester
.
check_transfo_xl_lm_head_output
(
output_result
)
@
require_torch_multigpu
def
test_multigpu_data_parallel_forward
(
self
):
@
require_torch_multi
_
gpu
def
test_multi
_
gpu_data_parallel_forward
(
self
):
# Opt-out of this test.
pass
...
...
tests/test_trainer_distributed.py
View file @
02bdfc02
...
...
@@ -2,7 +2,7 @@ import sys
from
typing
import
Dict
from
transformers
import
EvalPrediction
,
HfArgumentParser
,
TrainingArguments
,
is_torch_available
from
transformers.testing_utils
import
TestCasePlus
,
execute_subprocess_async
,
require_torch_multigpu
from
transformers.testing_utils
import
TestCasePlus
,
execute_subprocess_async
,
require_torch_multi
_
gpu
from
transformers.utils
import
logging
...
...
@@ -44,7 +44,7 @@ if is_torch_available():
class
TestTrainerDistributed
(
TestCasePlus
):
@
require_torch_multigpu
@
require_torch_multi
_
gpu
def
test_trainer
(
self
):
distributed_args
=
f
"""
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment