"vscode:/vscode.git/clone" did not exist on "03ffd0a02251e10c1aa14fca8cb0ab1e4e40b886"
Unverified Commit 354e9aff authored by Yichen Yan's avatar Yichen Yan Committed by GitHub
Browse files

[Release] Unify local build scripts to use `cibuildwheel` and reduce size of sdist (#1171)

* update exclude in sdist

* reuse cibw workflow in maint

* update

* fix

* fmt

* upload artifacts for [Release] PRs

* dot-prefix version file

* update
parent 055f8500
......@@ -44,12 +44,11 @@ jobs:
runs-on: macos-latest
timeout-minutes: 30
env:
NO_VERSION_LABEL: ${{ github.event_name == 'release' && 'OFF' || 'ON' }}
# NO_GIT_VERSION disables embedding the git commit hash in version metadata.
# NO_VERSION_LABEL disables embedding the toolchain / git commit hash in version metadata.
# Otherwise, the version of the SDist has a git hash suffix (e.g., 0.1.0+gitabcdef12),
# but the package built from the SDist has no way to get the git hash (it is not a git repo),
# leading to inconsistent versions between SDist and built packages (+gitabcdef12 vs. +gitunknown).
NO_GIT_VERSION: "ON"
NO_VERSION_LABEL: 'OFF'
steps:
- name: Checkout repository
......@@ -89,7 +88,7 @@ jobs:
- name: Upload SDist
# Not PR to save artifact storage, as SDist is only needed for releases.
if: github.event_name != 'pull_request'
if: github.event_name != 'pull_request' || contains(github.event.pull_request.title, '[Release]')
uses: actions/upload-artifact@v5
with:
name: sdist
......@@ -157,7 +156,7 @@ jobs:
- name: Upload wheels
# Not PR to save artifact storage, as wheels are only needed for releases.
if: github.event_name != 'pull_request'
if: github.event_name != 'pull_request' || contains(github.event.pull_request.title, '[Release]')
uses: actions/upload-artifact@v5
with:
name: wheels-${{ matrix.python-version }}-${{ runner.os }}-${{ runner.arch }}-${{ matrix.target.toolkit }}
......@@ -167,7 +166,7 @@ jobs:
list-artifacts:
name: List artifacts
# Not PR to save artifact storage, as artifacts are only needed for releases.
if: github.event_name != 'pull_request'
if: github.event_name != 'pull_request' || contains(github.event.pull_request.title, '[Release]')
runs-on: ubuntu-latest
needs: [build-sdist, build-wheels]
timeout-minutes: 15
......
......@@ -102,3 +102,6 @@ tilelang/jit/adapter/cython/.cycache
# CMake
cmake-build/
cmake-build-*/
# Git version for sdist
_git_commit.txt
# Reference: https://setuptools.pypa.io/en/latest/userguide/miscellaneous.html
# Include licenses
include VERSION
include LICENSE
include THIRDPARTYNOTICES.txt
# Version and dependency files
include version_provider.py
include requirements*.txt
include tilelang/jit/adapter/cython/cython_wrapper.pyx
# Include source files in SDist
include CMakeLists.txt
graft src
graft cmake
graft 3rdparty
# Include test suites in SDist
graft testing
graft examples
global-exclude .coverage .coverage.* coverage.xml coverage-*.xml coverage.*.xml
global-exclude .junit .junit.* junit.xml junit-*.xml junit.*.xml
# Exclude unneeded files and directories
prune .git
prune .github
prune */.git
prune */.github
prune 3rdparty/clang*
prune 3rdparty/llvm*
# Prune compiled files
prune */__pycache__
global-exclude *~ *.py[cod] *.so *.a *.dylib *.pxd *.dll *.lib *.o *.obj
./maint/scripts/docker_local_distribute.sh 2>&1 | tee docker_local_distribute.log
./maint/scripts/docker_pypi_distribute.sh 2>&1 | tee docker_pypi_distribute.log
#!/usr/bin/env bash
set -euxo pipefail
IMAGE="tilelang-builder:manylinux"
HOST_UNAME=$(uname -m)
case "$HOST_UNAME" in
x86_64) TARGETARCH=amd64 ;;
aarch64|arm64) TARGETARCH=arm64 ;;
*) echo "Unsupported architecture: $HOST_UNAME" >&2; exit 1 ;;
esac
if docker buildx version >/dev/null 2>&1; then
if docker info >/dev/null 2>&1; then
docker run --rm --privileged tonistiigi/binfmt --install amd64,arm64 >/dev/null 2>&1 || true
fi
if ! docker buildx inspect multi >/dev/null 2>&1; then
docker buildx create --name multi --driver docker-container --use >/dev/null 2>&1 || true
else
docker buildx use multi >/dev/null 2>&1 || true
fi
docker buildx inspect --bootstrap >/dev/null 2>&1 || true
for ARCH in amd64 arm64; do
TAG_PLATFORM="linux/${ARCH}"
TAG_IMAGE="${IMAGE}-${ARCH}"
docker buildx build \
--platform "${TAG_PLATFORM}" \
--build-arg TARGETARCH="${ARCH}" \
-f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" \
-t "${TAG_IMAGE}" \
--load \
.
script="sh maint/scripts/local_distribution.sh"
docker run --rm \
--platform "${TAG_PLATFORM}" \
-v "$(pwd):/tilelang" \
"${TAG_IMAGE}" \
/bin/bash -lc "$script"
if [ -d dist ]; then
mv -f dist "dist-local-${ARCH}"
fi
done
else
echo "docker buildx not found; building only host arch: ${TARGETARCH}" >&2
TAG_IMAGE="${IMAGE}-${TARGETARCH}"
TAG_PLATFORM="linux/${TARGETARCH}"
docker build \
--build-arg TARGETARCH="$TARGETARCH" \
-f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" \
-t "${TAG_IMAGE}" \
.
script="sh maint/scripts/local_distribution.sh"
docker run --rm \
--platform "${TAG_PLATFORM}" \
-v "$(pwd):/tilelang" \
"${TAG_IMAGE}" \
/bin/bash -lc "$script"
if [ -d dist ]; then
mv -f dist "dist-local-${TARGETARCH}"
fi
fi
# Build for local architecture
CIBW_BUILD='cp38-*' cibuildwheel .
#!/usr/bin/env bash
set -euxo pipefail
IMAGE="tilelang-builder:manylinux"
HOST_UNAME=$(uname -m)
case "$HOST_UNAME" in
x86_64) TARGETARCH=amd64 ;;
aarch64|arm64) TARGETARCH=arm64 ;;
*) echo "Unsupported architecture: $HOST_UNAME" >&2; exit 1 ;;
esac
if docker buildx version >/dev/null 2>&1; then
if docker info >/dev/null 2>&1; then
docker run --rm --privileged tonistiigi/binfmt --install amd64,arm64 >/dev/null 2>&1 || true
......@@ -21,50 +12,9 @@ if docker buildx version >/dev/null 2>&1; then
docker buildx use multi >/dev/null 2>&1 || true
fi
docker buildx inspect --bootstrap >/dev/null 2>&1 || true
for ARCH in amd64 arm64; do
TAG_PLATFORM="linux/${ARCH}"
TAG_IMAGE="${IMAGE}-${ARCH}"
docker buildx build \
--platform "${TAG_PLATFORM}" \
--build-arg TARGETARCH="${ARCH}" \
-f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" \
-t "${TAG_IMAGE}" \
--load \
.
script="sh maint/scripts/pypi_distribution.sh"
docker run --rm \
--platform "${TAG_PLATFORM}" \
-v "$(pwd):/tilelang" \
"${TAG_IMAGE}" \
/bin/bash -lc "$script"
if [ -d dist ]; then
mv -f dist "dist-pypi-${ARCH}"
fi
done
else
echo "docker buildx not found; building only host arch: ${TARGETARCH}" >&2
TAG_IMAGE="${IMAGE}-${TARGETARCH}"
TAG_PLATFORM="linux/${TARGETARCH}"
docker build \
--build-arg TARGETARCH="$TARGETARCH" \
-f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" \
-t "${TAG_IMAGE}" \
.
script="sh maint/scripts/pypi_distribution.sh"
docker run --rm \
--platform "${TAG_PLATFORM}" \
-v "$(pwd):/tilelang" \
"${TAG_IMAGE}" \
/bin/bash -lc "$script"
if [ -d dist ]; then
mv -f dist "dist-pypi-${TARGETARCH}"
fi
export CIBW_ARCHS='x86_64 aarch64'
fi
NO_VERSION_LABEL=ON CIBW_BUILD='cp38-*' cibuildwheel .
ARG TARGETARCH
FROM pytorch/manylinux2_28-builder:cuda12.1 AS builder_amd64
ENV CUDA_VERSION=12.1 \
AUDITWHEEL_PLAT=manylinux_2_28_x86_64
RUN pip3 install uv
FROM quay.io/pypa/manylinux2014_x86_64 AS builder_amd64
RUN yum-config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo
ARG CUDA_VERSION=12.1
ENV CUDA_VERSION=${CUDA_VERSION}
FROM quay.io/pypa/manylinux_2_28_aarch64 AS builder_arm64
FROM pytorch/manylinuxaarch64-builder:cuda12.8 AS builder_arm64
ENV CUDA_VERSION=12.8 \
AUDITWHEEL_PLAT=manylinux_2_28_aarch64
RUN /opt/python/cp312-cp312/bin/pip install uv
RUN dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo
ARG CUDA_VERSION=12.8
ENV CUDA_VERSION=${CUDA_VERSION}
ARG TARGETARCH
FROM builder_${TARGETARCH}
ENV DEBIAN_FRONTEND=noninteractive \
......@@ -19,12 +23,7 @@ ENV PATH="/usr/local/cuda/bin:${PATH}"
ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
RUN set -eux; \
uv venv -p 3.12 --seed /venv; \
pipx install cibuildwheel; \
git config --global --add safe.directory '/tilelang'
ENV PATH="/venv/bin:$PATH" \
VIRTUAL_ENV=/venv
RUN uv pip install build wheel
WORKDIR /tilelang
......@@ -59,11 +59,14 @@ metadata.version.provider = "version_provider"
metadata.version.provider-path = "."
experimental = true
# build.verbose = true
# logging.level = "DEBUG"
[tool.scikit-build.sdist]
# See MANIFEST.in for details
include = [
"VERSION",
"LICENSE",
"./VERSION",
".git_commit.txt",
"./LICENSE",
"THIRDPARTYNOTICES.txt",
"version_provider.py",
"requirements*.txt",
......@@ -71,7 +74,15 @@ include = [
"CMakeLists.txt",
"src/**",
"cmake/**",
"3rdparty/**",
# The vendored 3rdparty contents in sdist should be same as wheel.
# Need full TVM to build from source.
"3rdparty/tvm",
# CUTLASS
"3rdparty/cutlass/include",
"3rdparty/cutlass/tools",
# Composable Kernel
"3rdparty/composable_kernel/include",
"3rdparty/composable_kernel/library",
"testing/**",
"examples/**",
]
......@@ -80,8 +91,7 @@ exclude = [
".github",
"**/.git",
"**/.github",
"3rdparty/clang**",
"3rdparty/llvm**",
"3rdparty/**",
"build",
]
......@@ -90,7 +100,7 @@ tilelang = "tilelang"
"tilelang/src" = "src"
# NOTE: The mapping below places the contents of '3rdparty' inside 'tilelang/3rdparty' in the wheel.
# This is necessary to find TVM shared libraries at runtime.
# Restrict 3rdparty contents in wheel to the same allowlist as sdist
# The vendored 3rdparty contents in wheel should be same as sdist.
# TVM
"tilelang/3rdparty/tvm/src" = "3rdparty/tvm/src"
"tilelang/3rdparty/tvm/python" = "3rdparty/tvm/python"
......@@ -202,6 +212,7 @@ environment.PYTHONUNBUFFERED = "1"
environment.PATH = "/usr/local/cuda/bin:$PATH"
environment.LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
# Pin to glibc 2.17 for x86 and 2.28 for aarch64 for now
# TODO: upgrade to manylinux_2_28 at some time
manylinux-x86_64-image = "manylinux2014" # CentOS 7
manylinux-aarch64-image = "manylinux_2_28" # AlmaLinux 8
# Install CUDA runtime and stub driver library
......@@ -214,9 +225,11 @@ uname -a
case "$(uname -m)" in
"x86_64")
DEFAULT_CUDA_VERSION="12.1"
yum-config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo
;;
"aarch64")
DEFAULT_CUDA_VERSION="12.8"
dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo
;;
*)
......@@ -224,7 +237,7 @@ case "$(uname -m)" in
;;
esac
cudaver="$(echo "${CUDA_VERSION:-"12.4"}" | cut -d '.' -f-2)"
cudaver="$(echo "${CUDA_VERSION:-$DEFAULT_CUDA_VERSION}" | cut -d '.' -f-2)"
v="${cudaver//./-}"
yum install -y "cuda-minimal-build-${v}" "cuda-driver-devel-${v}" "cuda-nvrtc-devel-${v}" nvidia-driver-cuda-libs
"""
......
......@@ -4,10 +4,17 @@ import os
import platform
import subprocess
from pathlib import Path
from functools import lru_cache
ROOT = Path(__file__).parent
base_version = (ROOT / 'VERSION').read_text().strip()
# When installing a sdist,
# the installed version needs to match the sdist version,
# so pip will complain when we install `tilelang-0.1.6.post2+gitxxxx.tar.gz`.
# To workaround that, when building sdist,
# we do not add version label and use a file to store the git hash instead.
git_pin = ROOT / '.git_commit.txt'
def _read_cmake_bool(i: str | None, default=False):
......@@ -16,6 +23,7 @@ def _read_cmake_bool(i: str | None, default=False):
return i.lower() not in ('0', 'false', 'off', 'no', 'n', '')
@lru_cache(maxsize=1)
def get_git_commit_id() -> str | None:
"""Get the current git commit hash by running git in the current file's directory."""
......@@ -24,9 +32,13 @@ def get_git_commit_id() -> str | None:
capture_output=True,
encoding='utf-8')
if r.returncode == 0:
return r.stdout.strip()
_git = r.stdout.strip()
git_pin.write_text(_git)
return _git
elif git_pin.exists():
return git_pin.read_text().strip()
else:
return 'unknown'
return None
def dynamic_metadata(
......@@ -37,6 +49,9 @@ def dynamic_metadata(
version = base_version
# generate git version for sdist
get_git_commit_id()
if not _read_cmake_bool(os.environ.get('NO_VERSION_LABEL')):
exts = []
backend = None
......@@ -66,6 +81,8 @@ def dynamic_metadata(
pass
elif git_hash := get_git_commit_id():
exts.append(f'git{git_hash[:8]}')
else:
exts.append('gitunknown')
if exts:
version += '+' + '.'.join(exts)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment