Unverified Commit 11a24161 authored by anj-s's avatar anj-s Committed by GitHub
Browse files

[chore] Update the PyTorch version that we run CPU tests with (#809)

* update python version for cpu tess

* run CPU tests with updated PyTorch version

* update nightly and test PyTorch versions

* skip failing multiprocess pipe test

* always skip test

* always skip test

* always skip test

* lint error

* skip unsupported versions

* improve skip message

* lint errors
parent ce2ad89e
......@@ -116,11 +116,11 @@ install_dep_pytorch_nightly: &install_dep_pytorch_nightly
# check if we have restored venv cache (/home/circleci/venv) correctly, if so, just skip
if [ -f /home/circleci/venv/check_version.py ]; then python /home/circleci/venv/check_version.py torch eq 1.10 && exit 0; fi
# start installing
pip install --progress-bar off --pre torch==1.10.0.dev20210901+cu111 torchvision==0.11.0.dev20210901+cu111 -f https://download.pytorch.org/whl/nightly/cu111/torch_nightly.html
pip install --progress-bar off --pre torch==1.11.0.dev20211018+cu111 torchvision==0.12.0.dev20211018+cu111 -f https://download.pytorch.org/whl/nightly/cu111/torch_nightly.html
pip install --progress-bar off -r requirements-test.txt
pip install --progress-bar off -r requirements-benchmarks.txt
python -c 'import torch; print("Torch version:", torch.__version__)'
python -c 'import torch; assert torch.__version__.split(".")[:2] == ["1", "10"], "wrong torch version"'
python -c 'import torch; assert torch.__version__.split(".")[:2] == ["1", "11"], "wrong torch version"'
python -m torch.utils.collect_env
wget -O /home/circleci/venv/check_version.py https://raw.githubusercontent.com/min-xu-ai/check_verion/main/check_version.py
......@@ -272,14 +272,14 @@ jobs:
# Cache the venv directory that contains dependencies
- restore_cache:
keys:
- cache-key-cpu-py37-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- cache-key-cpu-py37-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_dep_181
- <<: *install_dep_190
- save_cache:
paths:
- ~/venv
key: cache-key-cpu-py37-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
key: cache-key-cpu-py37-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_repo
......@@ -306,13 +306,13 @@ jobs:
# Cache the venv directory that contains dependencies
- restore_cache:
keys:
- cache-key-cpu-py38-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_dep_181
- cache-key-cpu-py38-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_dep_190
- save_cache:
paths:
- ~/venv
key: cache-key-cpu-py38-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
key: cache-key-cpu-py38-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_repo
......@@ -339,14 +339,14 @@ jobs:
# Cache the venv directory that contains dependencies
- restore_cache:
keys:
- cache-key-cpu-py39-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- cache-key-cpu-py39-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_dep_181
- <<: *install_dep_190
- save_cache:
paths:
- ~/venv
key: cache-key-cpu-py39-181-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
key: cache-key-cpu-py39-190-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_repo
......@@ -511,14 +511,14 @@ jobs:
# Cache the venv directory that contains dependencies
- restore_cache:
keys:
- cache-key-py38-gpu-pytorch-nightly-111-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- cache-key-py38-gpu-pytorch-nightly-112-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_dep_pytorch_nightly
- save_cache:
paths:
- ~/venv
key: cache-key-py38-gpu-pytorch-nightly-111-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
key: cache-key-py38-gpu-pytorch-nightly-112-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}}
- <<: *install_repo
......
......@@ -151,7 +151,7 @@ At a high level, we want ML researchers to:
## Testing
We use circleci to test on PyTorch versions 1.6.0, 1.7.1, and 1.8.1. Please create an [issue](https://github.com/facebookresearch/fairscale/issues) if you are having trouble with installation.
We use circleci to test on PyTorch versions 1.7.1, 1.8.1 and 1.9.0. Please create an [issue](https://github.com/facebookresearch/fairscale/issues) if you are having trouble with installation.
## Contributors
......
......@@ -6,7 +6,7 @@ from source using the instructions below.
### Requirements
* PyTorch>= 1.6.0
* PyTorch>= 1.7.1
### Installing the pip package (stable)
......
......@@ -23,6 +23,11 @@ import torch.nn as nn
from fairscale.experimental.nn.distributed_pipeline import DistributedLoss, DistributedPipeline, PipelineModulesGraph
from fairscale.utils import torch_version
pytestmark = pytest.mark.skipif(
not torch.cuda.is_available() or torch_version() < (1, 9, 0),
reason="CPU tests fail right now and all tests require torch version >= 1.9.0.",
)
CPU_DEVICES = ["worker0/cpu", "worker1/cpu"]
GPU_DEVICES = ["worker0/cuda:0", "worker1/cuda:1"]
if torch.cuda.is_available():
......@@ -31,9 +36,6 @@ else:
DEVICES = [CPU_DEVICES]
pytestmark = pytest.mark.skipif(torch_version() < (1, 9, 0), reason="requires torch version >= 1.9.0")
def rpc_worker(rank, world_size, init_file, func, *args):
options = rpc.TensorPipeRpcBackendOptions(init_method="file://" + init_file)
for i in range(world_size):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment