# Python CircleCI 2.0 configuration file # # Check https://circleci.com/docs/2.0/language-python/ for more details # # Adopted from # https://github.com/facebookresearch/detectron2/blob/master/.circleci/config.yml version: 2 # ------------------------------------------------------------------------------------- # Environments to run the jobs in # ------------------------------------------------------------------------------------- cpu: &cpu docker: - image: circleci/python:3.7 resource_class: medium gpu: &gpu environment: CUDA_VERSION: "10.1" machine: image: ubuntu-1604-cuda-10.1:201909-23 resource_class: gpu.large # ------------------------------------------------------------------------------------- # Re-usable commands # ------------------------------------------------------------------------------------- setup_venv: &setup_venv - run: name: Setup Virtual Env working_directory: ~/ command: | python -m venv ~/venv echo ". ~/venv/bin/activate" >> $BASH_ENV . ~/venv/bin/activate python --version which python which pip pip install --upgrade pip install_dep_15: &install_dep_15 - run: name: Install Dependencies command: | pip install --progress-bar off torch==1.5.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html pip install --progress-bar off -r requirements-test.txt python -c 'import torch; print("Torch version:", torch.__version__)' python -m torch.utils.collect_env install_dep_16: &install_dep_16 - run: name: Install Dependencies command: | pip install --progress-bar off torch==1.6.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html pip install --progress-bar off -r requirements-test.txt python -c 'import torch; print("Torch version:", torch.__version__)' python -m torch.utils.collect_env install_repo_cpu: &install_repo_cpu - run: name: Install Repository command: | python setup.py build develop install_repo_gpu: &install_repo_gpu - run: name: Install Repository command: | export CUDA_HOME=/usr/local/cuda-10.1 python setup.py build develop run_coverage: &run_coverage - run: name: Run Unit Tests With Coverage command: | pytest --cov-report=xml --cov=./ #Uploading test coverage for Python code bash <(curl -s https://codecov.io/bash) -f coverage.xml -cF Python run_unittests: &run_unittests - run: name: Run Unit Tests command: | pytest --junitxml=test-results/junit.xml --verbose run_flake8: &run_flake8 - run: name: Run Linter (flake8) command: | flake8 --show-source --statistics run_pipe_benchmark: &run_pipe_benchmark - run: name: Run Pipe Benchmark command: | python benchmarks/pipe.py run_oss_benchmark: &run_oss_benchmark - run: name: Run OSS Benchmark command: | python benchmarks/oss.py run_oss_ddp_benchmark: &run_oss_ddp_benchmark - run: name: Run OSS DDP Benchmark command: | python benchmarks/oss.py --oss_ddp # ------------------------------------------------------------------------------------- # Jobs to run # ------------------------------------------------------------------------------------- jobs: cpu_tests: <<: *cpu working_directory: ~/fairscale steps: - checkout - <<: *setup_venv # Cache the venv directory that contains dependencies - restore_cache: keys: - cache-key-cpu-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_dep_16 - save_cache: paths: - ~/venv key: cache-key-cpu-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_repo_cpu - run: name: Run Linter (isort) command: | isort --check-only - run: name: Run Linter (black) command: | black --check . - run: name: Run type-checking (mypy) command: | mypy --pretty . - <<: *run_flake8 - <<: *run_unittests - store_test_results: path: test-results gpu_tests_15: <<: *gpu working_directory: ~/fairscale steps: - checkout - run: nvidia-smi - run: pyenv global 3.7.0 - <<: *setup_venv # Cache the venv directory that contains dependencies - restore_cache: keys: - cache-key-gpu15-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_dep_15 - save_cache: paths: - ~/venv key: cache-key-gpu15-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_repo_gpu - <<: *run_unittests - store_test_results: path: test-results gpu_tests_16: <<: *gpu working_directory: ~/fairscale steps: - checkout - run: nvidia-smi - run: pyenv global 3.7.0 - <<: *setup_venv # Cache the venv directory that contains dependencies - restore_cache: keys: - cache-key-gpu16-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_dep_16 - save_cache: paths: - ~/venv key: cache-key-gpu16-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_repo_gpu - <<: *run_unittests - <<: *run_coverage - store_test_results: path: test-results benchmarks: <<: *gpu working_directory: ~/fairscale steps: - checkout - run: nvidia-smi - run: pyenv global 3.7.0 - <<: *setup_venv # Cache the venv directory that contains dependencies - restore_cache: keys: - cache-key-gpu16-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_dep_16 - save_cache: paths: - ~/venv key: cache-key-gpu16-{{ checksum "setup.py"}}-{{ checksum "requirements-test.txt"}} - <<: *install_repo_gpu - <<: *run_pipe_benchmark - <<: *run_oss_benchmark - <<: *run_oss_ddp_benchmark workflows: version: 2 build: jobs: - cpu_tests - gpu_tests_15 - gpu_tests_16 - benchmarks