Unverified Commit 775a0f06 authored by Min Xu's avatar Min Xu Committed by GitHub
Browse files

[test] disable a flaky test (#1020)


Co-authored-by: default avatarMin Xu <min.xu.public@gmail.com>
parent a5116ecd
...@@ -81,6 +81,10 @@ skip_if_py39_no_cuda = pytest.mark.skipif( ...@@ -81,6 +81,10 @@ skip_if_py39_no_cuda = pytest.mark.skipif(
reason="Python3.9 without CUDA is skipped", reason="Python3.9 without CUDA is skipped",
) )
skip_due_to_flakyness = pytest.mark.skip(
reason="Flaky test to be fixed or removed",
)
available_devices = ["cpu"] available_devices = ["cpu"]
if torch.cuda.is_available(): if torch.cuda.is_available():
available_devices.append("cuda") available_devices.append("cuda")
......
...@@ -20,7 +20,7 @@ import torch.distributed.rpc as rpc ...@@ -20,7 +20,7 @@ import torch.distributed.rpc as rpc
import torch.multiprocessing as mp import torch.multiprocessing as mp
import torch.nn as nn import torch.nn as nn
from fair_dev.testing.testing import skip_if_single_gpu from fair_dev.testing.testing import skip_due_to_flakyness, skip_if_single_gpu
from fairscale.experimental.nn.distributed_pipeline import DistributedLoss, DistributedPipeline, PipelineModulesGraph from fairscale.experimental.nn.distributed_pipeline import DistributedLoss, DistributedPipeline, PipelineModulesGraph
from fairscale.internal import torch_version from fairscale.internal import torch_version
...@@ -113,6 +113,7 @@ def create_multiple_layers(): ...@@ -113,6 +113,7 @@ def create_multiple_layers():
@rpc_test(world_size=2) @rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES) @pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu @skip_if_single_gpu
@skip_due_to_flakyness
def create_multiple_workers(devices): def create_multiple_workers(devices):
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})] model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=1, devices=devices[:2]) pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=1, devices=devices[:2])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment