Unverified Commit cbbd685a authored by Yineng Zhang's avatar Yineng Zhang Committed by GitHub
Browse files

chore: use torch 2.8 stable (#8880)

parent 78aad910
...@@ -69,7 +69,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel html5li ...@@ -69,7 +69,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel html5li
12.9.1) CUINDEX=129 ;; \ 12.9.1) CUINDEX=129 ;; \
*) echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1 ;; \ *) echo "Unsupported CUDA version: $CUDA_VERSION" && exit 1 ;; \
esac \ esac \
&& python3 -m pip install --no-cache-dir -e "python[${BUILD_TYPE}]" --extra-index-url https://download.pytorch.org/whl/test/cu${CUINDEX} \ && python3 -m pip install --no-cache-dir -e "python[${BUILD_TYPE}]" --extra-index-url https://download.pytorch.org/whl/cu${CUINDEX} \
&& python3 -m pip install --no-cache-dir nvidia-nccl-cu12==2.27.6 --force-reinstall --no-deps \ && python3 -m pip install --no-cache-dir nvidia-nccl-cu12==2.27.6 --force-reinstall --no-deps \
&& if [ "$CUDA_VERSION" = "12.8.1" ]; then \ && if [ "$CUDA_VERSION" = "12.8.1" ]; then \
python3 -m pip install --no-cache-dir https://github.com/sgl-project/whl/releases/download/v0.3.2/sgl_kernel-0.3.2+cu128-cp39-abi3-manylinux2014_x86_64.whl --force-reinstall --no-deps ; \ python3 -m pip install --no-cache-dir https://github.com/sgl-project/whl/releases/download/v0.3.2/sgl_kernel-0.3.2+cu128-cp39-abi3-manylinux2014_x86_64.whl --force-reinstall --no-deps ; \
......
...@@ -32,7 +32,7 @@ rm -rf /usr/local/lib/python3.10/dist-packages/flashinfer* ...@@ -32,7 +32,7 @@ rm -rf /usr/local/lib/python3.10/dist-packages/flashinfer*
rm -rf /usr/local/lib/python3.10/dist-packages/sgl_kernel* rm -rf /usr/local/lib/python3.10/dist-packages/sgl_kernel*
# Install the main package # Install the main package
pip install -e "python[dev]" --extra-index-url https://download.pytorch.org/whl/test/${CU_VERSION} --break-system-packages pip install -e "python[dev]" --extra-index-url https://download.pytorch.org/whl/${CU_VERSION} --break-system-packages
# Show current packages # Show current packages
pip list pip list
......
...@@ -23,13 +23,13 @@ fi ...@@ -23,13 +23,13 @@ fi
if [ ${CUDA_VERSION} = "12.9" ]; then if [ ${CUDA_VERSION} = "12.9" ]; then
DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}" DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}"
TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/test/cu129" TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/cu129"
elif [ ${CUDA_VERSION} = "12.8" ]; then elif [ ${CUDA_VERSION} = "12.8" ]; then
DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}" DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}"
TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/test/cu128" TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/cu128"
else else
DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}" DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}"
TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/test/cu126" TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/cu126"
fi fi
docker run --rm \ docker run --rm \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment