Unverified Commit 867d669b authored by moto's avatar moto Committed by GitHub
Browse files

Add CUDA test on CCI (#586)

* Add GPU test

* Run only on master
parent b71b5ee5
...@@ -221,7 +221,7 @@ jobs: ...@@ -221,7 +221,7 @@ jobs:
docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID}
docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID}
unittest_linux: unittest_linux_cpu:
<<: *binary_common <<: *binary_common
docker: docker:
- image: "pytorch/torchaudio_unittest_base:manylinux" - image: "pytorch/torchaudio_unittest_base:manylinux"
...@@ -259,7 +259,47 @@ jobs: ...@@ -259,7 +259,47 @@ jobs:
- store_test_results: - store_test_results:
path: test-results path: test-results
unittest_windows: unittest_linux_gpu:
<<: *binary_common
machine:
image: ubuntu-1604-cuda-10.1:201909-23
resource_class: gpu.small
environment:
image_name: "pytorch/torchaudio_unittest_base:manylinux-cuda10.1"
steps:
- checkout
- run:
name: Generate cache key
# This will refresh cache on Sundays, nightly build should generate new cache.
command: echo "$(date +"%Y-%U")" > .circleci-weekly
- restore_cache:
keys:
- env-v1-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }}
- run:
name: Setup
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh
- save_cache:
key: env-v1-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }}
paths:
- conda
- env
- run:
name: Install torchaudio
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/install.sh
- run:
name: Run tests
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
- run:
name: Post Process
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh
- store_test_results:
path: test-results
unittest_windows_cpu:
<<: *binary_common <<: *binary_common
executor: executor:
name: windows-cpu name: windows-cpu
...@@ -338,23 +378,41 @@ workflows: ...@@ -338,23 +378,41 @@ workflows:
python_version: '3.8' python_version: '3.8'
unittest: unittest:
jobs: jobs:
- unittest_linux: - unittest_linux_cpu:
name: unittest_linux_py3.6 name: unittest_linux_cpu_py3.6
python_version: '3.6'
- unittest_linux_cpu:
name: unittest_linux_cpu_py3.7
python_version: '3.7'
- unittest_linux_cpu:
name: unittest_linux_cpu_py3.8
python_version: '3.8'
- unittest_linux_gpu:
filters:
branches:
only: master
name: unittest_linux_gpu_py3.6
python_version: '3.6' python_version: '3.6'
- unittest_linux: - unittest_linux_gpu:
name: unittest_linux_py3.7 filters:
branches:
only: master
name: unittest_linux_gpu_py3.7
python_version: '3.7' python_version: '3.7'
- unittest_linux: - unittest_linux_gpu:
name: unittest_linux_py3.8 filters:
branches:
only: master
name: unittest_linux_gpu_py3.8
python_version: '3.8' python_version: '3.8'
- unittest_windows: - unittest_windows_cpu:
name: unittest_windows_py3.6 name: unittest_windows_cpu_py3.6
python_version: '3.6' python_version: '3.6'
- unittest_windows: - unittest_windows_cpu:
name: unittest_windows_py3.7 name: unittest_windows_cpu_py3.7
python_version: '3.7' python_version: '3.7'
- unittest_windows: - unittest_windows_cpu:
name: unittest_windows_py3.8 name: unittest_windows_cpu_py3.8
python_version: '3.8' python_version: '3.8'
nightly: nightly:
jobs: jobs:
......
...@@ -221,7 +221,7 @@ jobs: ...@@ -221,7 +221,7 @@ jobs:
docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID}
docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID} docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/${image_name}:${CIRCLE_WORKFLOW_ID}
unittest_linux: unittest_linux_cpu:
<<: *binary_common <<: *binary_common
docker: docker:
- image: "pytorch/torchaudio_unittest_base:manylinux" - image: "pytorch/torchaudio_unittest_base:manylinux"
...@@ -259,7 +259,47 @@ jobs: ...@@ -259,7 +259,47 @@ jobs:
- store_test_results: - store_test_results:
path: test-results path: test-results
unittest_windows: unittest_linux_gpu:
<<: *binary_common
machine:
image: ubuntu-1604-cuda-10.1:201909-23
resource_class: gpu.small
environment:
image_name: "pytorch/torchaudio_unittest_base:manylinux-cuda10.1"
steps:
- checkout
- run:
name: Generate cache key
# This will refresh cache on Sundays, nightly build should generate new cache.
command: echo "$(date +"%Y-%U")" > .circleci-weekly
- restore_cache:
{% raw %}
keys:
- env-v1-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }}
{% endraw %}
- run:
name: Setup
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh
- save_cache:
{% raw %}
key: env-v1-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }}
{% endraw %}
paths:
- conda
- env
- run:
name: Install torchaudio
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/install.sh
- run:
name: Run tests
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
- run:
name: Post Process
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh
- store_test_results:
path: test-results
unittest_windows_cpu:
<<: *binary_common <<: *binary_common
executor: executor:
name: windows-cpu name: windows-cpu
......
...@@ -111,16 +111,21 @@ def indent(indentation, data_list): ...@@ -111,16 +111,21 @@ def indent(indentation, data_list):
def unittest_workflows(indentation=6): def unittest_workflows(indentation=6):
w = [] jobs = []
for os_type in ["linux", "windows"]: for os_type in ["linux", "windows"]:
for python_version in PYTHON_VERSIONS: for device_type in ["cpu", "gpu"]:
w.append({ if os_type == 'windows' and device_type == 'gpu':
f"unittest_{os_type}": { continue
"name": f"unittest_{os_type}_py{python_version}", for python_version in PYTHON_VERSIONS:
job = {
"name": f"unittest_{os_type}_{device_type}_py{python_version}",
"python_version": python_version, "python_version": python_version,
} }
})
return indent(indentation, w) if device_type == 'gpu':
job['filters'] = gen_filter_branch_tree('master')
jobs.append({f"unittest_{os_type}_{device_type}": job})
return indent(indentation, jobs)
if __name__ == "__main__": if __name__ == "__main__":
......
scripts/build_third_parties.sh scripts/build_third_parties.sh
Dockerfile.tmp
...@@ -45,7 +45,7 @@ RUN bash /scripts/build_third_parties.sh / ...@@ -45,7 +45,7 @@ RUN bash /scripts/build_third_parties.sh /
################################################################################ ################################################################################
# Build the final image # Build the final image
################################################################################ ################################################################################
FROM ubuntu:18.04 FROM BASE_IMAGE
RUN apt update && apt install -y \ RUN apt update && apt install -y \
g++ \ g++ \
gfortran \ gfortran \
......
...@@ -2,12 +2,29 @@ ...@@ -2,12 +2,29 @@
set -euo pipefail set -euo pipefail
if [ $# -ne 1 ]; then
printf "Usage %s <CUDA_VERSION>\n\n" "$0"
exit 1
fi
if [ "$1" = "cpu" ]; then
base_image="ubuntu:18.04"
image="pytorch/torchaudio_unittest_base:manylinux"
elif [[ "$1" =~ ^(9.2|10.1)$ ]]; then
base_image="nvidia/cuda:$1-runtime-ubuntu18.04"
image="pytorch/torchaudio_unittest_base:manylinux-cuda$1"
else
printf "Unexpected <CUDA_VERSION> string: %s" "$1"
exit 1;
fi
cd "$( dirname "${BASH_SOURCE[0]}" )" cd "$( dirname "${BASH_SOURCE[0]}" )"
root_dir="$(git rev-parse --show-toplevel)" root_dir="$(git rev-parse --show-toplevel)"
cp "${root_dir}"/packaging/build_from_source.sh ./scripts/build_third_parties.sh cp "${root_dir}"/packaging/build_from_source.sh ./scripts/build_third_parties.sh
tag="manylinux" # docker build also accepts reading from STDIN
image="pytorch/torchaudio_unittest_base:${tag}" # but in that case, no context (other files) can be passed, so we write out Dockerfile
docker build -t "${image}" . sed "s|BASE_IMAGE|${base_image}|g" Dockerfile > Dockerfile.tmp
docker build -t "${image}" -f Dockerfile.tmp .
docker push "${image}" docker push "${image}"
...@@ -10,8 +10,14 @@ set -e ...@@ -10,8 +10,14 @@ set -e
eval "$(./conda/bin/conda shell.bash hook)" eval "$(./conda/bin/conda shell.bash hook)"
conda activate ./env conda activate ./env
printf "* Installing PyTorch nightly build" if [ -z "${CUDA_VERSION:-}" ] ; then
conda install -y -c pytorch-nightly pytorch cpuonly cudatoolkit="cpuonly"
else
version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")"
cudatoolkit="cudatoolkit=${version}"
fi
printf "Installing PyTorch with %s\n" "${cudatoolkit}"
conda install -y -c pytorch-nightly pytorch "${cudatoolkit}"
printf "* Installing torchaudio\n" printf "* Installing torchaudio\n"
# Link codecs present at /third_party. See Dockerfile for how this is built # Link codecs present at /third_party. See Dockerfile for how this is built
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment