Unverified Commit 6b2e0a08 authored by Andrey Talman's avatar Andrey Talman Committed by GitHub
Browse files

CUDA 11.3 remove (#6567)

Fix cuda 11.6 and 11.7

Check nvjpeg from CONDA Path

More cuda11.6

Trying to fix the build

Trying to fix the build

Removing nvjpeg logic

Add NvToolsExt dll path

Fix lint
parent 355b2788
This diff is collapsed.
...@@ -853,7 +853,7 @@ jobs: ...@@ -853,7 +853,7 @@ jobs:
executor: executor:
name: windows-gpu name: windows-gpu
environment: environment:
CUDA_VERSION: "11.3" CUDA_VERSION: "11.6"
PYTHON_VERSION: << parameters.python_version >> PYTHON_VERSION: << parameters.python_version >>
steps: steps:
- checkout - checkout
...@@ -969,6 +969,7 @@ jobs: ...@@ -969,6 +969,7 @@ jobs:
command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh
- run: - run:
name: Build torchvision C++ distribution and test name: Build torchvision C++ distribution and test
no_output_timeout: 30m
command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh
cmake_macos_cpu: cmake_macos_cpu:
...@@ -1006,6 +1007,9 @@ jobs: ...@@ -1006,6 +1007,9 @@ jobs:
steps: steps:
- checkout_merge - checkout_merge
- designate_upload_channel - designate_upload_channel
- run:
name: Update CUDA driver
command: packaging/windows/internal/driver_update.bat
- run: - run:
command: | command: |
set -ex set -ex
......
...@@ -32,8 +32,8 @@ def build_workflows(prefix="", filter_branch=None, upload=False, indentation=6, ...@@ -32,8 +32,8 @@ def build_workflows(prefix="", filter_branch=None, upload=False, indentation=6,
for os_type in ["linux", "macos", "win"]: for os_type in ["linux", "macos", "win"]:
python_versions = PYTHON_VERSIONS python_versions = PYTHON_VERSIONS
cu_versions_dict = { cu_versions_dict = {
"linux": ["cpu", "cu102", "cu113", "cu116", "cu117", "rocm5.1.1", "rocm5.2"], "linux": ["cpu", "cu102", "cu116", "cu117", "rocm5.1.1", "rocm5.2"],
"win": ["cpu", "cu113", "cu116", "cu117"], "win": ["cpu", "cu116", "cu117"],
"macos": ["cpu"], "macos": ["cpu"],
} }
cu_versions = cu_versions_dict[os_type] cu_versions = cu_versions_dict[os_type]
...@@ -123,7 +123,6 @@ def upload_doc_job(filter_branch): ...@@ -123,7 +123,6 @@ def upload_doc_job(filter_branch):
manylinux_images = { manylinux_images = {
"cu102": "pytorch/manylinux-cuda102", "cu102": "pytorch/manylinux-cuda102",
"cu113": "pytorch/manylinux-cuda113",
"cu116": "pytorch/manylinux-cuda116", "cu116": "pytorch/manylinux-cuda116",
"cu117": "pytorch/manylinux-cuda117", "cu117": "pytorch/manylinux-cuda117",
} }
...@@ -266,9 +265,9 @@ def cmake_workflows(indentation=6): ...@@ -266,9 +265,9 @@ def cmake_workflows(indentation=6):
for device in device_types: for device in device_types:
job = {"name": f"cmake_{os_type}_{device}", "python_version": python_version} job = {"name": f"cmake_{os_type}_{device}", "python_version": python_version}
job["cu_version"] = "cu113" if device == "gpu" else "cpu" job["cu_version"] = "cu116" if device == "gpu" else "cpu"
if device == "gpu" and os_type == "linux": if device == "gpu" and os_type == "linux":
job["wheel_docker_image"] = "pytorch/manylinux-cuda113" job["wheel_docker_image"] = "pytorch/manylinux-cuda116"
jobs.append({f"cmake_{os_type}_{device}": job}) jobs.append({f"cmake_{os_type}_{device}": job})
return indent(indentation, jobs) return indent(indentation, jobs)
......
...@@ -24,8 +24,8 @@ else ...@@ -24,8 +24,8 @@ else
fi fi
cuda_toolkit_pckg="cudatoolkit" cuda_toolkit_pckg="cudatoolkit"
if [[ "$CU_VERSION" == cu116 || "$CU_VERSION" == cu117 ]]; then if [[ $CUDA_VERSION == 11.6 || $CUDA_VERSION == 11.7 ]]; then
cuda_toolkit_pckg="cuda" cuda_toolkit_pckg="pytorch-cuda"
fi fi
echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION" echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION"
......
...@@ -4,7 +4,7 @@ set -ex ...@@ -4,7 +4,7 @@ set -ex
echo CU_VERSION is "${CU_VERSION}" echo CU_VERSION is "${CU_VERSION}"
echo CUDA_VERSION is "${CUDA_VERSION}" echo CUDA_VERSION is "${CUDA_VERSION}"
# Currenly, CU_VERSION and CUDA_VERSION are not consistent. # Currenly, CU_VERSION and CUDA_VERSION are not consistent.
# to understand this code, see https://github.com/pytorch/vision/issues/4443 # to understand this code, see https://github.com/pytorch/vision/issues/4443
version="cpu" version="cpu"
if [[ ! -z "${CUDA_VERSION}" ]] ; then if [[ ! -z "${CUDA_VERSION}" ]] ; then
......
...@@ -97,7 +97,7 @@ if [[ "$OSTYPE" == "msys" ]]; then ...@@ -97,7 +97,7 @@ if [[ "$OSTYPE" == "msys" ]]; then
"$script_dir/windows/internal/vc_env_helper.bat" "$script_dir/windows/internal/build_frcnn.bat" $PARALLELISM "$script_dir/windows/internal/vc_env_helper.bat" "$script_dir/windows/internal/build_frcnn.bat" $PARALLELISM
mv fasterrcnn_resnet50_fpn.pt Release mv fasterrcnn_resnet50_fpn.pt Release
cd Release cd Release
export PATH=$(cygpath -w "C:/Program Files (x86)/torchvision/bin"):$(cygpath -w $TORCH_PATH)/lib:$PATH export PATH=$(cygpath -w "C:/Program Files/NVIDIA Corporation/NvToolsExt/bin/x64"):$(cygpath -w "C:/Program Files (x86)/torchvision/bin"):$(cygpath -w $TORCH_PATH)/lib:$PATH
else else
make -j$PARALLELISM make -j$PARALLELISM
fi fi
......
...@@ -62,14 +62,6 @@ setup_cuda() { ...@@ -62,14 +62,6 @@ setup_cuda() {
fi fi
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6" export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
;; ;;
cu113)
if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3"
else
export CUDA_HOME=/usr/local/cuda-11.3/
fi
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6"
;;
cu102) cu102)
if [[ "$OSTYPE" == "msys" ]]; then if [[ "$OSTYPE" == "msys" ]]; then
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2" export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2"
...@@ -275,9 +267,6 @@ setup_conda_cudatoolkit_constraint() { ...@@ -275,9 +267,6 @@ setup_conda_cudatoolkit_constraint() {
cu116) cu116)
export CONDA_CUDATOOLKIT_CONSTRAINT="- pytorch-cuda=11.6 # [not osx]" export CONDA_CUDATOOLKIT_CONSTRAINT="- pytorch-cuda=11.6 # [not osx]"
;; ;;
cu113)
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.3,<11.4 # [not osx]"
;;
cu102) cu102)
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]" export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]"
;; ;;
...@@ -307,9 +296,6 @@ setup_conda_cudatoolkit_plain_constraint() { ...@@ -307,9 +296,6 @@ setup_conda_cudatoolkit_plain_constraint() {
cu116) cu116)
export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda=11.6" export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda=11.6"
;; ;;
cu113)
export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=11.3"
;;
cu102) cu102)
export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=10.2" export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=10.2"
;; ;;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment