Unverified Commit 9edff453 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

skip some test_multi_gpu_data_parallel_forward (#18188)


Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent bc6fe6fb
...@@ -23,7 +23,7 @@ from packaging import version ...@@ -23,7 +23,7 @@ from packaging import version
from transformers import BeitConfig from transformers import BeitConfig
from transformers.models.auto import get_values from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -212,6 +212,11 @@ class BeitModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -212,6 +212,11 @@ class BeitModelTest(ModelTesterMixin, unittest.TestCase):
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_model_common_attributes(self): def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common() config, _ = self.model_tester.prepare_config_and_inputs_for_common()
......
...@@ -20,7 +20,7 @@ import unittest ...@@ -20,7 +20,7 @@ import unittest
from transformers import Data2VecVisionConfig from transformers import Data2VecVisionConfig
from transformers.models.auto import get_values from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -194,6 +194,13 @@ class Data2VecVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -194,6 +194,13 @@ class Data2VecVisionModelTest(ModelTesterMixin, unittest.TestCase):
# Data2VecVision does not use inputs_embeds # Data2VecVision does not use inputs_embeds
pass pass
@require_torch_multi_gpu
@unittest.skip(
reason="Data2VecVision has some layers using `add_module` which doesn't work well with `nn.DataParallel`"
)
def test_multi_gpu_data_parallel_forward(self):
pass
def test_model_common_attributes(self): def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common() config, _ = self.model_tester.prepare_config_and_inputs_for_common()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment