Unverified Commit 155de2b4 authored by Edward Z. Yang's avatar Edward Z. Yang Committed by GitHub
Browse files

Packaging fixes (#1214)

Add uploading support, make CUDA builds actually work.
parent 64ccfe34
This diff is collapsed.
......@@ -2,8 +2,9 @@ version: 2.1
# How to test the Linux jobs:
# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/
# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel
# - Replace binary_linux_wheel with the name of the job you want to test
# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7
# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test.
# Job names are 'name:' key.
binary_common: &binary_common
parameters:
......@@ -20,19 +21,23 @@ binary_common: &binary_common
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cuda_version:
description: "CUDA version to build against (e.g., cpu or 10.0)"
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
unicode_abi:
description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)"
type: string
default: ""
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "soumith/manylinux-cuda100"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
UNICODE_ABI: << parameters.unicode_abi >>
CUDA_VERSION: << parameters.cuda_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
circleci_consistency:
......@@ -49,13 +54,17 @@ jobs:
binary_linux_wheel:
<<: *binary_common
docker:
- image: "soumith/manylinux-cuda100"
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
......@@ -67,6 +76,10 @@ jobs:
- run: packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_macos_wheel:
<<: *binary_common
......@@ -85,6 +98,10 @@ jobs:
packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_macos_conda:
<<: *binary_common
......@@ -101,30 +118,101 @@ jobs:
packaging/build_conda.sh
- store_artifacts:
path: /Users/distiller/miniconda3/conda-bld/osx-64
- persist_to_workspace:
root: /Users/distiller/miniconda3/conda-bld/osx-64
paths:
- "*"
workflows:
build:
jobs:
- circleci_consistency
{%- for btype in ["wheel", "conda"] -%}
{%- for os in ["linux", "macos"] -%}
{%- for python_version in ["2.7", "3.5", "3.6", "3.7"] -%}
{%- for cuda_version in ["cpu", "10.0", "9.2"] -%}
{%- if os != "macos" or cuda_version == "cpu" %}
- binary_{{os}}_{{btype}}:
# TODO: cudacpu is ugly
name: binary_{{os}}_{{btype}}_py{{python_version}}_cuda{{cuda_version}}
python_version: "{{python_version}}"
cuda_version: "{{cuda_version}}"
{%- if btype == "wheel" and python_version == "2.7" %}
# Requires org-member context
binary_conda_upload:
docker:
- image: continuumio/miniconda
steps:
- attach_workspace:
at: ~/workspace
- run:
command: |
# Prevent credential from leaking
conda install -yq anaconda-client
set +x
anaconda login \
--username "$PYTORCH_BINARY_PJH5_CONDA_USERNAME" \
--password "$PYTORCH_BINARY_PJH5_CONDA_PASSWORD"
set -x
anaconda upload ~/workspace/*.tar.bz2 -u pytorch-nightly --label main --no-progress --force
# Requires org-member context
binary_wheel_upload:
docker:
- image: circleci/python:3.7
steps:
- attach_workspace:
at: ~/workspace
- checkout
- run:
command: |
pip install --user awscli
export PATH="$HOME/.local/bin:$PATH"
# Prevent credential from leaking
set +x
export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
set -x
#for pkg in ~/workspace/*.whl; do
# aws s3 cp "$pkg" "s3://pytorch/whl/nightly/" --acl public-read
#done
{%- macro workflow(btype, os, python_version, cu_version, unicode, prefix='', upload=False) %}
- binary_{{os}}_{{btype}}:
name: binary_{{os}}_{{btype}}_py{{python_version}}_cuda{{cuda_version}}_unicode
name: {{prefix}}binary_{{os}}_{{btype}}_py{{python_version}}{{ "u" if unicode }}_{{cu_version}}
python_version: "{{python_version}}"
cuda_version: "{{cuda_version}}"
cu_version: "{{cu_version}}"
{%- if unicode %}
unicode_abi: "1"
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- endif %}
{%- if cu_version == "cu92" %}
wheel_docker_image: "soumith/manylinux-cuda92"
{%- endif %}
{%- if upload %}
- binary_{{btype}}_upload:
name: {{prefix}}binary_{{os}}_{{btype}}_py{{python_version}}{{ "u" if unicode }}_{{cu_version}}_upload
context: org-member
requires:
- {{prefix}}binary_{{os}}_{{btype}}_py{{python_version}}{{ "u" if unicode }}_{{cu_version}}
{%- endif %}
{%- endmacro %}
{%- macro workflows(prefix='', upload=False) %}
{%- for btype in ["wheel", "conda"] -%}
{%- for os in ["linux", "macos"] -%}
{%- for python_version in ["2.7", "3.5", "3.6", "3.7"] -%}
{%- for cu_version in (["cpu", "cu92", "cu100"] if os == "linux" else ["cpu"]) -%}
{%- for unicode in ([False, True] if btype == "wheel" and python_version == "2.7" else [False]) -%}
{{ workflow(btype, os, python_version, cu_version, unicode, prefix=prefix, upload=upload) }}
{%- endfor -%}
{%- endfor -%}
{%- endfor -%}
{%- endfor -%}
{%- endfor -%}
{%- endfor %}
{%- endmacro %}
workflows:
build:
{%- if True %}
jobs:
- circleci_consistency
{{ workflows() }}
nightly:
triggers:
- schedule:
cron: "0 9 * * *"
filters:
branches:
only:
- master
{%- endif %}
jobs:
- circleci_consistency
{{ workflows(prefix="nightly_", upload=True) }}
......@@ -4,9 +4,7 @@ set -ex
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "$script_dir/pkg_helpers.bash"
setup_build_version 0.4.0
setup_cuda_suffix
setup_macos
setup_env 0.4.0
export SOURCE_ROOT_DIR="$PWD"
setup_conda_pytorch_constraint
setup_conda_cudatoolkit_constraint
......
......@@ -4,10 +4,8 @@ set -ex
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "$script_dir/pkg_helpers.bash"
setup_python
setup_cuda_suffix
setup_build_version 0.4.0
setup_macos
setup_env 0.4.0
setup_wheel_python
pip_install numpy pyyaml future ninja
setup_pip_pytorch_version
python setup.py clean
......
# A set of useful bash functions for common functionality we need to do in
# many build scripts
# Respecting PYTHON_VERSION and UNICODE_ABI, add (or install) the correct
# version of Python to perform a build. Relevant to wheel builds.
setup_python() {
if [[ "$(uname)" == Darwin ]]; then
eval "$(conda shell.bash hook)"
conda env remove -n "env$PYTHON_VERSION" || true
conda create -yn "env$PYTHON_VERSION" python="$PYTHON_VERSION"
conda activate "env$PYTHON_VERSION"
else
case "$PYTHON_VERSION" in
2.7)
if [[ -n "$UNICODE_ABI" ]]; then
python_abi=cp27-cp27mu
else
python_abi=cp27-cp27m
fi
;;
3.5) python_abi=cp35-cp35m ;;
3.6) python_abi=cp36-cp36m ;;
3.7) python_abi=cp37-cp37m ;;
*)
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
exit 1
;;
esac
export PATH="/opt/python/$python_abi/bin:$PATH"
fi
}
# Fill CUDA_SUFFIX and CU_VERSION with CUDA_VERSION. CUDA_SUFFIX is
# left blank for the default CUDA version (that's a blank suffix)
setup_cuda_suffix() {
if [[ "$(uname)" == Darwin ]]; then
if [[ "$CUDA_VERSION" != "cpu" ]]; then
echo "CUDA_VERSION on OS X must be cpu"
# Setup CUDA environment variables, based on CU_VERSION
#
# Inputs:
# CU_VERSION (cpu, cu92, cu100)
# NO_CUDA_PACKAGE (bool)
#
# Outputs:
# VERSION_SUFFIX (e.g., "")
# PYTORCH_VERSION_SUFFIX (e.g., +cpu)
# WHEEL_DIR (e.g., cu100/)
# CUDA_HOME (e.g., /usr/local/cuda-9.2, respected by torch.utils.cpp_extension)
# FORCE_CUDA (respected by torchvision setup.py)
# NVCC_FLAGS (respected by torchvision setup.py)
#
# Precondition: CUDA versions are installed in their conventional locations in
# /usr/local/cuda-*
#
# NOTE: Why VERSION_SUFFIX versus PYTORCH_VERSION_SUFFIX? If you're building
# a package with CUDA on a platform we support CUDA on, VERSION_SUFFIX ==
# PYTORCH_VERSION_SUFFIX and everyone is happy. However, if you are building a
# package with only CPU bits (e.g., torchaudio), then VERSION_SUFFIX is always
# empty, but PYTORCH_VERSION_SUFFIX is +cpu (because that's how you get a CPU
# version of a Python package. But that doesn't apply if you're on OS X,
# since the default CU_VERSION on OS X is cpu.
setup_cuda() {
if [[ "$(uname)" == Darwin ]] || [[ -n "$NO_CUDA_PACKAGE" ]]; then
if [[ "$CU_VERSION" != "cpu" ]]; then
echo "CU_VERSION on OS X / package with no CUDA must be cpu"
exit 1
fi
export CPU_SUFFIX=""
export CU_VERSION="cpu"
if [[ "$(uname)" == Darwin ]]; then
export PYTORCH_VERSION_SUFFIX=""
else
export PYTORCH_VERSION_SUFFIX="+cpu"
fi
export VERSION_SUFFIX=""
# NB: When there is no CUDA package available, we put these
# packages in the top-level directory, so they are eligible
# for selection even if you are otherwise trying to install
# a cu100 stack. This differs from when there ARE CUDA packages
# available; then we don't want the cpu package; we want
# to give you as much goodies as possible.
export WHEEL_DIR=""
else
case "$CUDA_VERSION" in
10.0)
export CUDA_SUFFIX=""
export CU_VERSION="cu100"
case "$CU_VERSION" in
cu100)
export PYTORCH_VERSION_SUFFIX=""
export CUDA_HOME=/usr/local/cuda-10.0/
export FORCE_CUDA=1
# Hard-coding gencode flags is temporary situation until
# https://github.com/pytorch/pytorch/pull/23408 lands
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
;;
9.2)
export CUDA_SUFFIX="+cu92"
export CU_VERSION="cu92"
cu92)
export CUDA_HOME=/usr/local/cuda-9.2/
export PYTORCH_VERSION_SUFFIX="+cu92"
export FORCE_CUDA=1
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_50,code=compute_50"
;;
cpu)
export CUDA_SUFFIX="+cpu"
export CU_VERSION="cpu"
export PYTORCH_VERSION_SUFFIX="+cpu"
;;
*)
echo "Unrecognized CUDA_VERSION=$CUDA_VERSION"
echo "Unrecognized CU_VERSION=$CU_VERSION"
esac
export CPU_SUFFIX="+cpu"
export VERSION_SUFFIX="$PYTORCH_VERSION_SUFFIX"
export WHEEL_DIR="$CU_VERSION/"
fi
}
# If a package is cpu-only, we never provide a cuda suffix
setup_cpuonly_cuda_suffix() {
export CUDA_SUFFIX=""
export CPU_SUFFIX=""
}
# Fill BUILD_VERSION and BUILD_NUMBER if it doesn't exist already with a nightly string
# Usage: setup_build_version 0.2
# Populate build version if necessary, and add version suffix
#
# Inputs:
# BUILD_VERSION (e.g., 0.2.0 or empty)
#
# Outputs:
# BUILD_VERSION (e.g., 0.2.0.dev20190807+cpu)
#
# Fill BUILD_VERSION if it doesn't exist already with a nightly string
# Usage: setup_build_version 0.2.0
setup_build_version() {
if [[ -z "$BUILD_VERSION" ]]; then
export BUILD_VERSION="$1.dev$(date "+%Y%m%d")"
export BUILD_VERSION="$1.dev$(date "+%Y%m%d")$VERSION_SUFFIX"
else
export BUILD_VERSION="$BUILD_VERSION$VERSION_SUFFIX"
fi
}
......@@ -82,11 +97,55 @@ setup_macos() {
fi
}
# Top-level entry point for things every package will need to do
#
# Usage: setup_env 0.2.0
setup_env() {
setup_cuda
setup_build_version "$1"
setup_macos
}
# Function to retry functions that sometimes timeout or have flaky failures
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
# Inputs:
# PYTHON_VERSION (2.7, 3.5, 3.6, 3.7)
# UNICODE_ABI (bool)
#
# Outputs:
# PATH modified to put correct Python version in PATH
#
# Precondition: If Linux, you are in a soumith/manylinux-cuda* Docker image
setup_wheel_python() {
if [[ "$(uname)" == Darwin ]]; then
eval "$(conda shell.bash hook)"
conda env remove -n "env$PYTHON_VERSION" || true
conda create -yn "env$PYTHON_VERSION" python="$PYTHON_VERSION"
conda activate "env$PYTHON_VERSION"
else
case "$PYTHON_VERSION" in
2.7)
if [[ -n "$UNICODE_ABI" ]]; then
python_abi=cp27-cp27mu
else
python_abi=cp27-cp27m
fi
;;
3.5) python_abi=cp35-cp35m ;;
3.6) python_abi=cp36-cp36m ;;
3.7) python_abi=cp37-cp37m ;;
*)
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
exit 1
;;
esac
export PATH="/opt/python/$python_abi/bin:$PATH"
fi
}
# Install with pip a bit more robustly than the default
pip_install() {
retry pip install --progress-bar off "$@"
......@@ -98,7 +157,7 @@ setup_pip_pytorch_version() {
if [[ -z "$PYTORCH_VERSION" ]]; then
# Install latest prerelease version of torch, per our nightlies, consistent
# with the requested cuda version
pip_install --pre torch -f "https://download.pytorch.org/whl/nightly/${CU_VERSION}/torch_nightly.html"
pip_install --pre torch -f "https://download.pytorch.org/whl/nightly/${WHEEL_DIR}torch_nightly.html"
if [[ "$CUDA_VERSION" == "cpu" ]]; then
# CUDA and CPU are ABI compatible on the CPU-only parts, so strip
# in this case
......@@ -107,9 +166,9 @@ setup_pip_pytorch_version() {
export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//')"
fi
else
# TODO: Maybe add staging too
pip_install "torch==$PYTORCH_VERSION$CUDA_SUFFIX" \
-f https://download.pytorch.org/whl/torch_stable.html
-f https://download.pytorch.org/whl/torch_stable.html \
-f https://download.pytorch.org/whl/nightly/torch_nightly.html
fi
}
......@@ -117,21 +176,19 @@ setup_pip_pytorch_version() {
# CONDA_CHANNEL_FLAGS with appropriate flags to retrieve these versions
#
# You MUST have populated CUDA_SUFFIX before hand.
#
# TODO: This is currently hard-coded for CPU-only case
setup_conda_pytorch_constraint() {
if [[ -z "$PYTORCH_VERSION" ]]; then
export CONDA_CHANNEL_FLAGS="-c pytorch-nightly"
export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | python -c "import sys, json, re; print(re.sub(r'\\+.*$', '', json.load(sys.stdin)['pytorch'][-1]['version']))")"
else
export CONDA_CHANNEL_FLAGS="-c pytorch"
export CONDA_CHANNEL_FLAGS="-c pytorch -c pytorch-nightly"
fi
if [[ "$CUDA_VERSION" == cpu ]]; then
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION${CPU_SUFFIX}"
if [[ "$CU_VERSION" == cpu ]]; then
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION${PYTORCH_VERSION_SUFFIX}"
export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
else
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${CUDA_SUFFIX}"
export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${CUDA_SUFFIX}"
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
fi
}
......@@ -141,17 +198,21 @@ setup_conda_cudatoolkit_constraint() {
if [[ "$(uname)" == Darwin ]]; then
export CONDA_CUDATOOLKIT_CONSTRAINT=""
else
case "$CUDA_VERSION" in
10.0)
case "$CU_VERSION" in
cu100)
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]"
;;
9.2)
cu92)
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]"
;;
cpu)
export CONDA_CUDATOOLKIT_CONSTRAINT=""
export CONDA_CPUONLY_FEATURE="- cpuonly"
;;
*)
echo "Unrecognized CU_VERSION=$CU_VERSION"
exit 1
;;
esac
fi
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment