Unverified Commit ffe735ba authored by Nikita Shulga's avatar Nikita Shulga Committed by GitHub
Browse files

Add CUDA binary builds (#1497)

* Add CUDA binary builds

* Add "cuda_version": "cpu" to doc build jobs

* Add required cu_versions to pkg_helpers

* Regenerate config.yml
parent b78bdde0
This diff is collapsed.
...@@ -71,11 +71,23 @@ binary_common: &binary_common ...@@ -71,11 +71,23 @@ binary_common: &binary_common
python_version: python_version:
description: "Python version to build against (e.g., 3.8)" description: "Python version to build against (e.g., 3.8)"
type: string type: string
cuda_version:
description: "CUDA version to build against (e.g., cpu, cu101)"
type: string
default: "cpu"
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda102"
conda_docker_image:
description: "Conda only: what docker image to use"
type: string
default: "pytorch/conda-builder:cuda102"
environment: &environment environment: &environment
PYTHON_VERSION: << parameters.python_version >> PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >> BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >> PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: cpu CU_VERSION: << parameters.cuda_version >>
smoke_test_common: &smoke_test_common smoke_test_common: &smoke_test_common
<<: *binary_common <<: *binary_common
...@@ -127,7 +139,7 @@ jobs: ...@@ -127,7 +139,7 @@ jobs:
binary_linux_wheel: binary_linux_wheel:
<<: *binary_common <<: *binary_common
docker: docker:
- image: "pytorch/manylinux-cuda102" - image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+ resource_class: 2xlarge+
steps: steps:
- checkout - checkout
...@@ -144,7 +156,7 @@ jobs: ...@@ -144,7 +156,7 @@ jobs:
binary_linux_conda: binary_linux_conda:
<<: *binary_common <<: *binary_common
docker: docker:
- image: "pytorch/conda-cuda" - image: "<< parameters.conda_docker_image >>"
resource_class: 2xlarge+ resource_class: 2xlarge+
steps: steps:
- checkout - checkout
......
...@@ -21,6 +21,10 @@ import os.path ...@@ -21,6 +21,10 @@ import os.path
PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CU_VERSIONS_DICT = {"linux": ["cpu", "cu102", "cu111"],
"windows": ["cpu", "cu102", "cu111"],
"macos": ["cpu"]}
DOC_VERSION = ('linux', '3.8') DOC_VERSION = ('linux', '3.8')
...@@ -31,7 +35,8 @@ def build_workflows(prefix='', upload=False, filter_branch=None, indentation=6): ...@@ -31,7 +35,8 @@ def build_workflows(prefix='', upload=False, filter_branch=None, indentation=6):
for btype in ["wheel", "conda"]: for btype in ["wheel", "conda"]:
for os_type in ["linux", "macos", "windows"]: for os_type in ["linux", "macos", "windows"]:
for python_version in PYTHON_VERSIONS: for python_version in PYTHON_VERSIONS:
w += build_workflow_pair(btype, os_type, python_version, filter_branch, prefix, upload) for cu_version in CU_VERSIONS_DICT[os_type]:
w += build_workflow_pair(btype, os_type, python_version, cu_version, filter_branch, prefix, upload)
if not filter_branch: if not filter_branch:
# Build on every pull request, but upload only on nightly and tags # Build on every pull request, but upload only on nightly and tags
...@@ -53,27 +58,19 @@ def build_download_job(filter_branch): ...@@ -53,27 +58,19 @@ def build_download_job(filter_branch):
return [{"download_third_parties_nix": job}] return [{"download_third_parties_nix": job}]
def build_workflow_pair(btype, os_type, python_version, filter_branch, prefix='', upload=False): def build_workflow_pair(btype, os_type, python_version, cu_version, filter_branch, prefix='', upload=False):
w = [] w = []
base_workflow_name = "{prefix}binary_{os_type}_{btype}_py{python_version}".format( base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}_{cu_version}"
prefix=prefix, w.append(generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype))
os_type=os_type,
btype=btype,
python_version=python_version,
)
w.append(generate_base_workflow(base_workflow_name, python_version, filter_branch, os_type, btype))
if upload: if upload:
is_py3_linux = os_type in ['linux', "windows"] and not python_version.startswith("2.")
w.append(generate_upload_workflow(base_workflow_name, filter_branch, btype)) w.append(generate_upload_workflow(base_workflow_name, filter_branch, btype))
if filter_branch == 'nightly' and is_py3_linux: if filter_branch == 'nightly' and os_type != 'macos':
pydistro = 'pip' if btype == 'wheel' else 'conda' pydistro = 'pip' if btype == 'wheel' else 'conda'
w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type)) w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type))
return w return w
...@@ -82,7 +79,7 @@ def build_doc_job(filter_branch): ...@@ -82,7 +79,7 @@ def build_doc_job(filter_branch):
job = { job = {
"name": "build_docs", "name": "build_docs",
"python_version": "3.8", "python_version": "3.8",
"requires": ["binary_linux_wheel_py3.8", ], "requires": ["binary_linux_wheel_py3.8_cpu", ],
} }
if filter_branch: if filter_branch:
...@@ -107,7 +104,7 @@ def docstring_parameters_sync_job(filter_branch): ...@@ -107,7 +104,7 @@ def docstring_parameters_sync_job(filter_branch):
job = { job = {
"name": "docstring_parameters_sync", "name": "docstring_parameters_sync",
"python_version": "3.8", "python_version": "3.8",
"requires": ["binary_linux_wheel_py3.8", ], "requires": ["binary_linux_wheel_py3.8_cpu", ],
} }
if filter_branch: if filter_branch:
...@@ -115,15 +112,20 @@ def docstring_parameters_sync_job(filter_branch): ...@@ -115,15 +112,20 @@ def docstring_parameters_sync_job(filter_branch):
return [{"docstring_parameters_sync": job}] return [{"docstring_parameters_sync": job}]
def generate_base_workflow(base_workflow_name, python_version, filter_branch, os_type, btype): def generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype):
d = { d = {
"name": base_workflow_name, "name": base_workflow_name,
"python_version": python_version, "python_version": python_version,
"cuda_version": cu_version,
} }
if os_type in ['linux', 'macos']: if os_type in ['linux', 'macos']:
d['requires'] = ['download_third_parties_nix'] d['requires'] = ['download_third_parties_nix']
if btype == 'conda':
d['conda_docker_image'] = f'pytorch/conda-builder:{cu_version.replace("cu1","cuda1")}'
elif cu_version != 'cpu':
d['wheel_docker_image'] = f'pytorch/manylinux-{cu_version.replace("cu1","cuda1")}'
if filter_branch: if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch) d["filters"] = gen_filter_branch_tree(filter_branch)
...@@ -157,23 +159,23 @@ def generate_upload_workflow(base_workflow_name, filter_branch, btype): ...@@ -157,23 +159,23 @@ def generate_upload_workflow(base_workflow_name, filter_branch, btype):
return {"binary_{btype}_upload".format(btype=btype): d} return {"binary_{btype}_upload".format(btype=btype): d}
def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type): def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type):
required_build_suffix = "_upload" required_build_suffix = "_upload"
required_build_name = base_workflow_name + required_build_suffix required_build_name = base_workflow_name + required_build_suffix
smoke_suffix = "smoke_test_{pydistro}".format(pydistro=pydistro) smoke_suffix = f"smoke_test_{pydistro}".format(pydistro=pydistro)
d = { d = {
"name": "{base_workflow_name}_{smoke_suffix}".format( "name": f"{base_workflow_name}_{smoke_suffix}",
base_workflow_name=base_workflow_name, smoke_suffix=smoke_suffix),
"requires": [required_build_name], "requires": [required_build_name],
"python_version": python_version, "python_version": python_version,
"cuda_version": cu_version,
} }
if filter_branch: if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch) d["filters"] = gen_filter_branch_tree(filter_branch)
return {"smoke_test_{os_type}_{pydistro}".format(os_type=os_type, pydistro=pydistro): d} return {f"smoke_test_{os_type}_{pydistro}": d}
def indent(indentation, data_list): def indent(indentation, data_list):
...@@ -192,6 +194,7 @@ def unittest_workflows(indentation=6): ...@@ -192,6 +194,7 @@ def unittest_workflows(indentation=6):
job = { job = {
"name": f"unittest_{os_type}_{device_type}_py{python_version}", "name": f"unittest_{os_type}_{device_type}_py{python_version}",
"python_version": python_version, "python_version": python_version,
"cuda_version": 'cpu' if device_type=="cpu" else "cu102",
} }
if os_type != "windows": if os_type != "windows":
...@@ -204,6 +207,7 @@ def unittest_workflows(indentation=6): ...@@ -204,6 +207,7 @@ def unittest_workflows(indentation=6):
"stylecheck": { "stylecheck": {
"name": f"stylecheck_py{python_version}", "name": f"stylecheck_py{python_version}",
"python_version": python_version, "python_version": python_version,
"cuda_version": 'cpu' if device_type=="cpu" else "cu102",
} }
}) })
return indent(indentation, jobs) return indent(indentation, jobs)
......
...@@ -52,17 +52,60 @@ setup_cuda() { ...@@ -52,17 +52,60 @@ setup_cuda() {
# Now work out the CUDA settings # Now work out the CUDA settings
case "$CU_VERSION" in case "$CU_VERSION" in
cu112)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2"
else
export CUDA_HOME=/usr/local/cuda-11.2/
fi
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
;;
cu111)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1"
else
export CUDA_HOME=/usr/local/cuda-11.1/
fi
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
;;
cu110)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0"
else
export CUDA_HOME=/usr/local/cuda-11.0/
fi
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0"
;;
cu102)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2"
else
export CUDA_HOME=/usr/local/cuda-10.2/
fi
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5"
;;
cu101)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.1"
else
export CUDA_HOME=/usr/local/cuda-10.1/
fi
export FORCE_CUDA=1
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5"
;;
cu100) cu100)
export CUDA_HOME=/usr/local/cuda-10.0/ export CUDA_HOME=/usr/local/cuda-10.0/
export FORCE_CUDA=1 export FORCE_CUDA=1
# Hard-coding gencode flags is temporary situation until export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5"
# https://github.com/pytorch/pytorch/pull/23408 lands
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
;; ;;
cu92) cu92)
export CUDA_HOME=/usr/local/cuda-9.2/ export CUDA_HOME=/usr/local/cuda-9.2/
export FORCE_CUDA=1 export FORCE_CUDA=1
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_50,code=compute_50" export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0"
;; ;;
cpu) cpu)
;; ;;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment