"...git@developer.sourcefind.cn:yangql/composable_kernel.git" did not exist on "500fa9951297c033a9c4c1d300b03895a46528d2"
Unverified Commit ba2c1856 authored by Lei Wang's avatar Lei Wang Committed by GitHub
Browse files

[Dependency] Add torch-c-dlpack-ext to project requirements (#1403)



* [Dependency] Add torch-c-dlpack-ext to project requirements

* Added torch-c-dlpack-ext to both pyproject.toml and requirements.txt to provide prebuilt torch extensions, which may prevent JIT compilation on first import of TVM FFI.

* [Build] Update manylinux images in project configuration

* Changed the manylinux image for x86_64 from "manylinux2014" to "manylinux_2_28" in both pyproject.toml and the Dockerfile to align with updated standards for compatibility and performance.

* [Build] Update CUDA repository configuration in pyproject.toml

* Changed the package manager command from `yum-config-manager` to `dnf config-manager` for adding the CUDA repository, ensuring compatibility with newer systems.

* fix

* [Build] Update CUDA repository to RHEL 8

* Changed the CUDA repository configuration in both pyproject.toml and the manylinux Dockerfile from RHEL 7 to RHEL 8, ensuring compatibility with newer systems.

* test: run out of space

* use cu130 to reduce size

* upd

* upd comment

* upd

---------
Co-authored-by: default avatarYour Name <wenji.yyc@alibaba-inc.com>
parent 08262bce
...@@ -106,7 +106,7 @@ jobs: ...@@ -106,7 +106,7 @@ jobs:
strategy: strategy:
matrix: matrix:
target: target:
- { runner: ubuntu-latest, toolkit: "CUDA-12.1" } - { runner: ubuntu-latest, toolkit: "CUDA-12.8" }
- { runner: ubuntu-24.04-arm, toolkit: "CUDA-12.8" } - { runner: ubuntu-24.04-arm, toolkit: "CUDA-12.8" }
- { runner: macos-latest, toolkit: "Metal" } - { runner: macos-latest, toolkit: "Metal" }
python-version: python-version:
......
FROM quay.io/pypa/manylinux2014_x86_64 AS builder_amd64 FROM quay.io/pypa/manylinux_2_28_x86_64 AS builder_amd64
RUN yum-config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo RUN dnf config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo
ARG CUDA_VERSION=12.1 ARG CUDA_VERSION=12.8
ENV CUDA_VERSION=${CUDA_VERSION} ENV CUDA_VERSION=${CUDA_VERSION}
FROM quay.io/pypa/manylinux_2_28_aarch64 AS builder_arm64 FROM quay.io/pypa/manylinux_2_28_aarch64 AS builder_arm64
......
...@@ -32,6 +32,9 @@ dependencies = [ ...@@ -32,6 +32,9 @@ dependencies = [
# should be removed after our tvm's update. # should be removed after our tvm's update.
# See discussion in tilelang#1373 and apache/tvm-ffi#307 # See discussion in tilelang#1373 and apache/tvm-ffi#307
"apache-tvm-ffi>=0.1.2", "apache-tvm-ffi>=0.1.2",
# torch-c-dlpack-ext provides prebuilt torch extensions.
# Without it, TVM FFI may require JIT compilation on first import.
"torch-c-dlpack-ext",
"cloudpickle", "cloudpickle",
"ml-dtypes", "ml-dtypes",
"numpy>=1.23.5", "numpy>=1.23.5",
...@@ -218,12 +221,10 @@ environment.PYTHONDEVMODE = "1" ...@@ -218,12 +221,10 @@ environment.PYTHONDEVMODE = "1"
environment.PYTHONUNBUFFERED = "1" environment.PYTHONUNBUFFERED = "1"
environment.PATH = "/usr/local/cuda/bin:$PATH" environment.PATH = "/usr/local/cuda/bin:$PATH"
environment.LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH" environment.LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
# Pin to glibc 2.17 for x86 and 2.28 for aarch64 for now manylinux-x86_64-image = "manylinux_2_28" # AlmaLinux 8
# TODO: upgrade to manylinux_2_28 at some time manylinux-aarch64-image = "manylinux_2_28" # AlmaLinux 8
manylinux-x86_64-image = "manylinux2014" # CentOS 7
manylinux-aarch64-image = "manylinux_2_28" # AlmaLinux 8
# Install CUDA runtime and stub driver library # Install CUDA runtime and stub driver library
# manylinux_2_28 uses gcc 14, which needs CUDA 12.8 # manylinux_2_28 uses gcc 14, which needs CUDA >=12.8
before-all = """ before-all = """
set -eux set -eux
...@@ -232,8 +233,8 @@ uname -a ...@@ -232,8 +233,8 @@ uname -a
case "$(uname -m)" in case "$(uname -m)" in
"x86_64") "x86_64")
DEFAULT_CUDA_VERSION="12.1" DEFAULT_CUDA_VERSION="12.8"
yum-config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo dnf config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo
;; ;;
"aarch64") "aarch64")
DEFAULT_CUDA_VERSION="12.8" DEFAULT_CUDA_VERSION="12.8"
...@@ -247,6 +248,7 @@ esac ...@@ -247,6 +248,7 @@ esac
cudaver="$(echo "${CUDA_VERSION:-$DEFAULT_CUDA_VERSION}" | cut -d '.' -f-2)" cudaver="$(echo "${CUDA_VERSION:-$DEFAULT_CUDA_VERSION}" | cut -d '.' -f-2)"
v="${cudaver//./-}" v="${cudaver//./-}"
yum install -y "cuda-minimal-build-${v}" "cuda-driver-devel-${v}" "cuda-nvrtc-devel-${v}" nvidia-driver-cuda-libs yum install -y "cuda-minimal-build-${v}" "cuda-driver-devel-${v}" "cuda-nvrtc-devel-${v}" nvidia-driver-cuda-libs
yum clean all
""" """
repair-wheel-command = [ repair-wheel-command = [
"auditwheel -v repair --exclude libtvm_ffi.so --exclude libcuda.so.1 --exclude '/usr/local/cuda*' -w {dest_dir} {wheel}", "auditwheel -v repair --exclude libtvm_ffi.so --exclude libcuda.so.1 --exclude '/usr/local/cuda*' -w {dest_dir} {wheel}",
...@@ -261,7 +263,8 @@ repair-wheel-command = [ ...@@ -261,7 +263,8 @@ repair-wheel-command = [
[[tool.cibuildwheel.overrides]] [[tool.cibuildwheel.overrides]]
select = "*linux*x86_64*" select = "*linux*x86_64*"
# CentOS 7 is too old to run import test. Do wheel installation test only. # x86_64 runners in GitHub Actions have limited storage,
test-command = [ # pre-install torch without caching to reduce disk usage during install tilelang.
"echo 'Wheel is installed successfully'", before-test = [
"pip install torch --no-cache-dir",
] ]
# Runtime requirements # Runtime requirements
apache-tvm-ffi>=0.1.2 apache-tvm-ffi>=0.1.2
torch-c-dlpack-ext
cloudpickle cloudpickle
ml-dtypes ml-dtypes
numpy>=1.23.5 numpy>=1.23.5
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment