Commit cc26cd81 authored by panning's avatar panning
Browse files

merge v0.16.0

parents f78f29f5 fbb4cc54
#!/usr/bin/env bash
set -ex
if [ "$2" == "" ]; then
echo call as "$0" "<src>" "<target branch>"
echo where src is the root of the built documentation git checkout and
echo branch should be "main" or "1.7" or so
exit 1
fi
src=$1
target=$2
echo "committing docs from ${src} to ${target}"
pushd "${src}"
git checkout gh-pages
mkdir -p ./"${target}"
rm -rf ./"${target}"/*
cp -r "${src}/docs/build/html/"* ./"$target"
if [ "${target}" == "main" ]; then
mkdir -p ./_static
rm -rf ./_static/*
cp -r "${src}/docs/build/html/_static/"* ./_static
git add --all ./_static || true
fi
git add --all ./"${target}" || true
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "auto-generating sphinx docs" || true
git remote add https https://github.com/pytorch/vision.git
git push -u https gh-pages
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g. the nested loops for defining the configuration matrix)
should be at the top of the file for easy updating.
See this comment for design rationale:
https://github.com/pytorch/vision/pull/1321#issuecomment-531033978
"""
import os.path
import jinja2
import yaml
from jinja2 import select_autoescape
PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
def build_workflows(prefix="", filter_branch=None, upload=False, indentation=6, windows_latest_only=False):
w = []
for btype in ["wheel", "conda"]:
for os_type in ["linux", "macos", "win"]:
python_versions = PYTHON_VERSIONS
cu_versions_dict = {
"linux": ["cpu", "cu116", "cu117", "rocm5.1.1", "rocm5.2"],
"win": ["cpu", "cu116", "cu117"],
"macos": ["cpu"],
}
cu_versions = cu_versions_dict[os_type]
for python_version in python_versions:
for cu_version in cu_versions:
# ROCm conda packages not yet supported
if cu_version.startswith("rocm") and btype == "conda":
continue
for unicode in [False]:
fb = filter_branch
if (
windows_latest_only
and os_type == "win"
and filter_branch is None
and (
python_version != python_versions[-1]
or (cu_version not in [cu_versions[0], cu_versions[-1]])
)
):
fb = "main"
if not fb and (
os_type == "linux" and cu_version == "cpu" and btype == "wheel" and python_version == "3.7"
):
# the fields must match the build_docs "requires" dependency
fb = "/.*/"
w += workflow_pair(
btype, os_type, python_version, cu_version, unicode, prefix, upload, filter_branch=fb
)
if not filter_branch:
# Build on every pull request, but upload only on nightly and tags
w += build_doc_job("/.*/")
w += upload_doc_job("nightly")
return indent(indentation, w)
def workflow_pair(btype, os_type, python_version, cu_version, unicode, prefix="", upload=False, *, filter_branch=None):
w = []
unicode_suffix = "u" if unicode else ""
base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}{unicode_suffix}_{cu_version}"
w.append(
generate_base_workflow(
base_workflow_name, python_version, cu_version, unicode, os_type, btype, filter_branch=filter_branch
)
)
if upload:
w.append(generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, filter_branch=filter_branch))
# disable smoke tests, they are broken and needs to be fixed
# if filter_branch == "nightly" and os_type in ["linux", "win"]:
# pydistro = "pip" if btype == "wheel" else "conda"
# w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type))
return w
def build_doc_job(filter_branch):
job = {
"name": "build_docs",
"python_version": "3.7",
"requires": [
"binary_linux_wheel_py3.7_cpu",
],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch, tags_list=RC_PATTERN)
return [{"build_docs": job}]
def upload_doc_job(filter_branch):
job = {
"name": "upload_docs",
"context": "org-member",
"python_version": "3.7",
"requires": [
"build_docs",
],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch, tags_list=RC_PATTERN)
return [{"upload_docs": job}]
manylinux_images = {
"cu116": "pytorch/manylinux-cuda116",
"cu117": "pytorch/manylinux-cuda117",
}
def get_manylinux_image(cu_version):
if cu_version == "cpu":
return "pytorch/manylinux-cpu"
elif cu_version.startswith("cu"):
cu_suffix = cu_version[len("cu") :]
return f"pytorch/manylinux-cuda{cu_suffix}"
elif cu_version.startswith("rocm"):
rocm_suffix = cu_version[len("rocm") :]
return f"pytorch/manylinux-rocm:{rocm_suffix}"
def get_conda_image(cu_version):
if cu_version == "cpu":
return "pytorch/conda-builder:cpu"
elif cu_version.startswith("cu"):
cu_suffix = cu_version[len("cu") :]
return f"pytorch/conda-builder:cuda{cu_suffix}"
def generate_base_workflow(
base_workflow_name, python_version, cu_version, unicode, os_type, btype, *, filter_branch=None
):
d = {
"name": base_workflow_name,
"python_version": python_version,
"cu_version": cu_version,
}
if os_type != "win" and unicode:
d["unicode_abi"] = "1"
if os_type != "win":
d["wheel_docker_image"] = get_manylinux_image(cu_version)
# ROCm conda packages not yet supported
if "rocm" not in cu_version:
d["conda_docker_image"] = get_conda_image(cu_version)
if filter_branch is not None:
d["filters"] = {
"branches": {"only": filter_branch},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
w = f"binary_{os_type}_{btype}"
return {w: d}
def gen_filter_branch_tree(*branches, tags_list=None):
filter_dict = {"branches": {"only": [b for b in branches]}}
if tags_list is not None:
filter_dict["tags"] = {"only": tags_list}
return filter_dict
def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = "" if os_type == "macos" else cu_version + "/"
if filter_branch is not None:
d["filters"] = {
"branches": {"only": filter_branch},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
return {f"binary_{btype}_upload": d}
def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type):
required_build_suffix = "_upload"
required_build_name = base_workflow_name + required_build_suffix
smoke_suffix = f"smoke_test_{pydistro}"
d = {
"name": f"{base_workflow_name}_{smoke_suffix}",
"requires": [required_build_name],
"python_version": python_version,
}
if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch)
return {f"smoke_test_{os_type}_{pydistro}": d}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(yaml.dump(data_list, default_flow_style=False).splitlines())
def unittest_workflows(indentation=6):
jobs = []
for os_type in ["linux", "windows", "macos"]:
for device_type in ["cpu", "gpu"]:
if os_type == "macos" and device_type == "gpu":
continue
for i, python_version in enumerate(PYTHON_VERSIONS):
job = {
"name": f"unittest_{os_type}_{device_type}_py{python_version}",
"python_version": python_version,
}
if device_type == "gpu":
if python_version != "3.8":
job["filters"] = gen_filter_branch_tree("main", "nightly")
job["cu_version"] = "cu116"
else:
job["cu_version"] = "cpu"
jobs.append({f"unittest_{os_type}_{device_type}": job})
return indent(indentation, jobs)
def cmake_workflows(indentation=6):
jobs = []
python_version = "3.8"
for os_type in ["linux", "windows", "macos"]:
# Skip OSX CUDA
device_types = ["cpu", "gpu"] if os_type != "macos" else ["cpu"]
for device in device_types:
job = {"name": f"cmake_{os_type}_{device}", "python_version": python_version}
job["cu_version"] = "cu116" if device == "gpu" else "cpu"
if device == "gpu" and os_type == "linux":
job["wheel_docker_image"] = "pytorch/manylinux-cuda116"
jobs.append({f"cmake_{os_type}_{device}": job})
return indent(indentation, jobs)
def ios_workflows(indentation=6, nightly=False):
jobs = []
build_job_names = []
name_prefix = "nightly_" if nightly else ""
env_prefix = "nightly-" if nightly else ""
for arch, platform in [("x86_64", "SIMULATOR"), ("arm64", "OS")]:
name = f"{name_prefix}binary_libtorchvision_ops_ios_12.0.0_{arch}"
build_job_names.append(name)
build_job = {
"build_environment": f"{env_prefix}binary-libtorchvision_ops-ios-12.0.0-{arch}",
"ios_arch": arch,
"ios_platform": platform,
"name": name,
}
if nightly:
build_job["filters"] = gen_filter_branch_tree("nightly")
jobs.append({"binary_ios_build": build_job})
if nightly:
upload_job = {
"build_environment": f"{env_prefix}binary-libtorchvision_ops-ios-12.0.0-upload",
"context": "org-member",
"filters": gen_filter_branch_tree("nightly"),
"requires": build_job_names,
}
jobs.append({"binary_ios_upload": upload_job})
return indent(indentation, jobs)
def android_workflows(indentation=6, nightly=False):
jobs = []
build_job_names = []
name_prefix = "nightly_" if nightly else ""
env_prefix = "nightly-" if nightly else ""
name = f"{name_prefix}binary_libtorchvision_ops_android"
build_job_names.append(name)
build_job = {
"build_environment": f"{env_prefix}binary-libtorchvision_ops-android",
"name": name,
}
if nightly:
upload_job = {
"build_environment": f"{env_prefix}binary-libtorchvision_ops-android-upload",
"context": "org-member",
"filters": gen_filter_branch_tree("nightly"),
"name": f"{name_prefix}binary_libtorchvision_ops_android_upload",
}
jobs.append({"binary_android_upload": upload_job})
else:
jobs.append({"binary_android_build": build_job})
return indent(indentation, jobs)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=select_autoescape(enabled_extensions=("html", "xml")),
keep_trailing_newline=True,
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(
env.get_template("config.yml.in").render(
build_workflows=build_workflows,
unittest_workflows=unittest_workflows,
cmake_workflows=cmake_workflows,
ios_workflows=ios_workflows,
android_workflows=android_workflows,
)
)
# this Dockerfile is for torchvision smoke test, it will be created periodically via CI system
# if you need to do it locally, follow below steps once you have Docker installed
# assuming you're within the directory where this Dockerfile located
# $ docker build . -t torchvision/smoketest
# if you want to push to aws ecr, make sure you have the rights to write to ECR, then run
# $ eval $(aws ecr get-login --region us-east-1 --no-include-email)
# $ export MYTAG=localbuild ## you can choose whatever tag you like
# $ docker tag torchvision/smoketest 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchvision/smoke_test:${MYTAG}
# $ docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchvision/smoke_test:${MYTAG}
FROM ubuntu:latest
RUN apt-get -qq update && apt-get -qq -y install curl bzip2 libsox-fmt-all \
&& curl -sSL https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -o /tmp/miniconda.sh \
&& bash /tmp/miniconda.sh -bfp /usr/local \
&& rm -rf /tmp/miniconda.sh \
&& conda install -y python=3 \
&& conda update conda \
&& apt-get -qq -y remove curl bzip2 \
&& apt-get -qq -y autoremove \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/* /var/log/dpkg.log \
&& conda clean --all --yes
ENV PATH /opt/conda/bin:$PATH
RUN conda create -y --name python3.7 python=3.7
RUN conda create -y --name python3.8 python=3.8
RUN conda create -y --name python3.9 python=3.9
RUN conda create -y --name python3.10 python=3.10
SHELL [ "/bin/bash", "-c" ]
RUN echo "source /usr/local/etc/profile.d/conda.sh" >> ~/.bashrc
CMD [ "/bin/bash"]
#!/bin/bash
set -ex -o pipefail
echo "DIR: $(pwd)"
echo "ANDROID_HOME=${ANDROID_HOME}"
echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}"
echo "JAVA_HOME=${JAVA_HOME}"
WORKSPACE=/home/circleci/workspace
VISION_ANDROID=/home/circleci/project/android
. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh
GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties
rm -f $GRADLE_LOCAL_PROPERTIES
echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES
echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES
echo "GRADLE_PATH $GRADLE_PATH"
echo "GRADLE_HOME $GRADLE_HOME"
${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} assemble || true
mkdir -p ~/workspace/artifacts
find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz
find . -type f -name *apk -print | xargs tar cfvz ~/workspace/artifacts/artifacts-apks.tgz
#!/bin/bash
set -ex -o pipefail
echo "DIR: $(pwd)"
echo "ANDROID_HOME=${ANDROID_HOME}"
echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}"
echo "JAVA_HOME=${JAVA_HOME}"
WORKSPACE=/home/circleci/workspace
VISION_ANDROID=/home/circleci/project/android
. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh
GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties
rm -f $GRADLE_LOCAL_PROPERTIES
GRADLE_PROPERTIES=/home/circleci/project/android/gradle.properties
echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES
echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES
echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES
echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES
cat /home/circleci/project/android/gradle.properties | grep VERSION
${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} ops:uploadArchives
mkdir -p ~/workspace/artifacts
find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz
#!/bin/bash
set -ex
_https_amazon_aws=https://ossci-android.s3.amazonaws.com
GRADLE_VERSION=6.8.3
_gradle_home=/opt/gradle
sudo rm -rf $gradle_home
sudo mkdir -p $_gradle_home
curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip
sudo unzip -q /tmp/gradle.zip -d $_gradle_home
rm /tmp/gradle.zip
sudo chmod -R 777 $_gradle_home
export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION
export GRADLE_PATH=${GRADLE_HOME}/bin/gradle
#!/bin/bash
set -ex -o pipefail
echo ""
echo "DIR: $(pwd)"
WORKSPACE=/Users/distiller/workspace
PROJ_ROOT_IOS=/Users/distiller/project/ios
PYTORCH_IOS_NIGHTLY_NAME=libtorch_ios_nightly_build.zip
export TCLLIBPATH="/usr/local/lib"
# install conda
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
chmod +x ~/conda.sh
/bin/bash ~/conda.sh -b -p ~/anaconda
export PATH="~/anaconda/bin:${PATH}"
source ~/anaconda/bin/activate
# install dependencies
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions wget --yes
conda install -c conda-forge valgrind --yes
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
# sync submodules
cd ${PROJ_ROOT_IOS}
git submodule sync
git submodule update --init --recursive
# download pytorch-iOS nightly build and unzip it
mkdir -p ${PROJ_ROOT_IOS}/lib
mkdir -p ${PROJ_ROOT_IOS}/build
mkdir -p ${PROJ_ROOT_IOS}/pytorch
TORCH_ROOT="${PROJ_ROOT_IOS}/pytorch"
cd ${TORCH_ROOT}
wget https://ossci-ios-build.s3.amazonaws.com/${PYTORCH_IOS_NIGHTLY_NAME}
mkdir -p ./build_ios
unzip -d ./build_ios ./${PYTORCH_IOS_NIGHTLY_NAME}
LIBTORCH_HEADER_ROOT="${TORCH_ROOT}/build_ios/install/include"
cd ${PROJ_ROOT_IOS}
IOS_ARCH=${IOS_ARCH} LIBTORCH_HEADER_ROOT=${LIBTORCH_HEADER_ROOT} ./build_ios.sh
rm -rf ${TORCH_ROOT}
# store the binary
DEST_DIR=${WORKSPACE}/ios/${IOS_ARCH}
mkdir -p ${DEST_DIR}
cp ${PROJ_ROOT_IOS}/lib/*.a ${DEST_DIR}
#!/bin/bash
set -ex -o pipefail
echo ""
echo "DIR: $(pwd)"
WORKSPACE=/Users/distiller/workspace
PROJ_ROOT=/Users/distiller/project
ARTIFACTS_DIR=${WORKSPACE}/ios
ls ${ARTIFACTS_DIR}
ZIP_DIR=${WORKSPACE}/zip
mkdir -p ${ZIP_DIR}/install/lib
# build a FAT bianry
cd ${ZIP_DIR}/install/lib
libs=("${ARTIFACTS_DIR}/x86_64/libtorchvision_ops.a" "${ARTIFACTS_DIR}/arm64/libtorchvision_ops.a")
lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/libtorchvision_ops.a
lipo -i ${ZIP_DIR}/install/lib/*.a
# copy the license
cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/
# zip the library
ZIPFILE=libtorchvision_ops_ios_nightly_build.zip
cd ${ZIP_DIR}
#for testing
touch version.txt
echo $(date +%s) > version.txt
zip -r ${ZIPFILE} install version.txt LICENSE
# upload to aws
# Install conda then 'conda install' awscli
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
chmod +x ~/conda.sh
/bin/bash ~/conda.sh -b -p ~/anaconda
export PATH="~/anaconda/bin:${PATH}"
source ~/anaconda/bin/activate
conda install -c conda-forge awscli --yes
set +x
export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD}
export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD}
set -x
aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read
channels:
- pytorch
- defaults
dependencies:
- pytest
- pytest-cov
- pytest-mock
- pip
- libpng
- jpeg
- ca-certificates
- h5py
- pip:
- future
- scipy
- av
#!/usr/bin/env bash
unset PYTORCH_VERSION
# For unittest, nightly PyTorch is used as the following section,
# so no need to set PYTORCH_VERSION.
# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config.
set -e
eval "$(./conda/bin/conda shell.bash hook)"
conda activate ./env
if [ "${CU_VERSION:-}" == cpu ] ; then
cudatoolkit="cpuonly"
version="cpu"
else
if [[ ${#CU_VERSION} -eq 4 ]]; then
CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}"
elif [[ ${#CU_VERSION} -eq 5 ]]; then
CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}"
fi
echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION: ${CU_VERSION} "
version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")"
cudatoolkit="pytorch-cuda=${version}"
fi
case "$(uname -s)" in
Darwin*) os=MacOSX;;
*) os=Linux
esac
printf "Installing PyTorch with %s\n" "${cudatoolkit}"
if [ "${os}" == "MacOSX" ]; then
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}"
else
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}"
fi
printf "* Installing torchvision\n"
python setup.py develop
#!/usr/bin/env bash
set -e
eval "$(./conda/bin/conda shell.bash hook)"
conda activate ./env
#!/usr/bin/env bash
set -e
eval "$(./conda/bin/conda shell.bash hook)"
conda activate ./env
python -m torch.utils.collect_env
pytest --junitxml=test-results/junit.xml -v --durations 20
#!/usr/bin/env bash
# This script is for setting up environment in which unit test is ran.
# To speed up the CI time, the resulting environment is cached.
#
# Do not install PyTorch and torchvision here, otherwise they also get cached.
set -e
this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# Avoid error: "fatal: unsafe repository"
git config --global --add safe.directory '*'
root_dir="$(git rev-parse --show-toplevel)"
conda_dir="${root_dir}/conda"
env_dir="${root_dir}/env"
cd "${root_dir}"
case "$(uname -s)" in
Darwin*) os=MacOSX;;
*) os=Linux
esac
# 1. Install conda at ./conda
if [ ! -d "${conda_dir}" ]; then
printf "* Installing conda\n"
wget -O miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh"
bash ./miniconda.sh -b -f -p "${conda_dir}"
fi
eval "$(${conda_dir}/bin/conda shell.bash hook)"
# 2. Create test environment at ./env
if [ ! -d "${env_dir}" ]; then
printf "* Creating a test environment\n"
conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION"
fi
conda activate "${env_dir}"
# 3. Install Conda dependencies
printf "* Installing dependencies (except PyTorch)\n"
FFMPEG_PIN="=4.2"
if [[ "${PYTHON_VERSION}" = "3.9" ]]; then
FFMPEG_PIN=">=4.2"
fi
conda install -y -c pytorch "ffmpeg${FFMPEG_PIN}"
conda env update --file "${this_dir}/environment.yml" --prune
channels:
- pytorch
- defaults
dependencies:
- pytest
- pytest-cov
- pytest-mock
- pip
- libpng
- jpeg
- ca-certificates
- hdf5
- setuptools
- pip:
- future
- scipy
- av != 9.1.1
- dataclasses
- h5py
#!/usr/bin/env bash
unset PYTORCH_VERSION
# For unittest, nightly PyTorch is used as the following section,
# so no need to set PYTORCH_VERSION.
# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config.
set -ex
this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')"
conda activate ./env
# TODO, refactor the below logic to make it easy to understand how to get correct cuda_version.
if [ "${CU_VERSION:-}" == cpu ] ; then
cudatoolkit="cpuonly"
version="cpu"
else
if [[ ${#CU_VERSION} -eq 4 ]]; then
CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}"
elif [[ ${#CU_VERSION} -eq 5 ]]; then
CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}"
fi
cuda_toolkit_pckg="cudatoolkit"
if [[ $CUDA_VERSION == 11.6 || $CUDA_VERSION == 11.7 ]]; then
cuda_toolkit_pckg="pytorch-cuda"
fi
echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION"
version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")"
cudatoolkit="${cuda_toolkit_pckg}=${version}"
fi
printf "Installing PyTorch with %s\n" "${cudatoolkit}"
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}"
torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())")
echo torch.cuda.is_available is $torch_cuda
if [ ! -z "${CUDA_VERSION:-}" ] ; then
if [ "$torch_cuda" == "False" ]; then
echo "torch with cuda installed but torch.cuda.is_available() is False"
exit 1
fi
fi
source "$this_dir/set_cuda_envs.sh"
printf "* Installing torchvision\n"
"$this_dir/vc_env_helper.bat" python setup.py develop
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
#!/usr/bin/env bash
set -e
eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')"
conda activate ./env
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment