Unverified Commit 011c0c41 authored by Benjamin Lefaudeux's avatar Benjamin Lefaudeux Committed by GitHub
Browse files

[chore] disheartening switch off of a OSS cpu test (#356)

* precise skip, only if agent has only cpu
parent 4401ced9
...@@ -30,7 +30,7 @@ jobs: ...@@ -30,7 +30,7 @@ jobs:
run: | run: |
python -m cibuildwheel --output-dir dist python -m cibuildwheel --output-dir dist
env: env:
CIBW_BUILD: "cp36-*64 cp37-*64 cp38-*64 cp39-*64" CIBW_BUILD: "cp37-*64 cp38-*64 cp39-*64"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_BEFORE_BUILD: pip install . CIBW_BEFORE_BUILD: pip install .
......
...@@ -58,6 +58,11 @@ skip_if_py38 = pytest.mark.skipif( ...@@ -58,6 +58,11 @@ skip_if_py38 = pytest.mark.skipif(
sys.version_info.major == 3 and sys.version_info.minor == 8, reason="Python3.8 is skipped" sys.version_info.major == 3 and sys.version_info.minor == 8, reason="Python3.8 is skipped"
) )
skip_if_py39_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available() and sys.version_info.major == 3 and sys.version_info.minor == 9,
reason="Python3.9 wo CUDA is skipped",
)
_, filename_mpi = tempfile.mkstemp() _, filename_mpi = tempfile.mkstemp()
......
...@@ -22,7 +22,7 @@ import torch.multiprocessing as mp ...@@ -22,7 +22,7 @@ import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP
import fairscale.optim as optim import fairscale.optim as optim
from fairscale.utils.testing import skip_if_no_cuda, skip_if_single_gpu from fairscale.utils.testing import skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu") DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
...@@ -564,6 +564,7 @@ def run_test_multiple_groups(rank, world_size, tempfile_name): ...@@ -564,6 +564,7 @@ def run_test_multiple_groups(rank, world_size, tempfile_name):
dist.destroy_process_group(process_group) dist.destroy_process_group(process_group)
@skip_if_py39_no_cuda
def test_multiple_groups(): def test_multiple_groups():
world_size = 6 world_size = 6
temp_file_name = tempfile.mkstemp()[1] temp_file_name = tempfile.mkstemp()[1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment