Unverified Commit d89ba5b8 authored by Yichen Yan's avatar Yichen Yan Committed by GitHub
Browse files

[Build] Migrate to scikit-build-core (#939)



* cleanup

* init

* build first wheel that may not work

* build cython ext

* fix tvm build

* use sabi

* update rpath to support auditwheel

* pass editible build

* update ci

* fix warnings

* do not use ccache in self host runner

* test local uv cache

* test pip index

* update lib search to respect new lib location

* fix

* update ci

* enable cuda by default

* update src map

* fix

* fix

* fix

* Generate version with backend and git information at build time

* copy tvm_cython to wheels

* fix tvm lib search

* fmt

* remove unused

* auto detect ccache

* add back backend-related files

* remove jit cython adaptor to simplify code

* fmt

* fix ci

* ci fix 2

* ci fix 3

* workaround metal

* ci fix 4

* fmt

* fmt

* Revert "ci fix 4"

This reverts commit d1de8291c3e40927955f3ad3cf87a75c78813676.

* tmp

* fix metal

* trivial cleanup

* add detailed build-time version for cuda

* add back mlc

* Restore wheel info and other trivial updates

* update

* fix cuda

* upd

* fix metal ci

* test for ga build

* test for nvidia/cuda

* test ubuntu 20

* fix

* fix

* Do not use `uv build`

* fix

* fix

* log toolchain version

* merge wheel

* update

* debug

* fix

* update

* skip rocm

* update artifacts each

* fix

* fix

* add mac

* fix cache

* fix cache

* fix cache

* reset and add comment

* upd

* fix git version

* update deps

* trivial update

* use in-tree build dir and install to src to speedup editable build

* Revert "use in-tree build dir and install to src to speedup editable build"

This reverts commit 6ab87b05c5eed811210136b8dca4fc3677dd51f2.

* add build-dir

* update docs

* remove old scrips

* [1/n] cleanup scripts

* [Lint]: [pre-commit.ci] auto fixes [...]

* fix and update

* wait for tvm fix

* revert some tmp fix

* fix

* fix

* spell

* doc update

* test cibuildwheel

* fix and test macos on ci

* Update .github/workflows/dist.yml
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>

* fix

* test ga event

* cleanup

* bump tvm to support api3

* test final version

* add cron

* Update .github/workflows/dist.yml
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>

* fix

* test ccache for metal cibuildwheel

* test newer macos

* finish

---------
Co-authored-by: default avatarpre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>
parent bab57f23
......@@ -12,49 +12,39 @@ env:
jobs:
format-check:
runs-on: [self-hosted, nvidia]
permissions:
contents: write
env:
UV_INDEX_URL: https://mirrors.bfsu.edu.cn/pypi/web/simple
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
fetch-depth: 0
submodules: recursive
- name: Set up Python
uses: actions/setup-python@v6
- name: Install python via uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: false
cache-local-path: ${{ runner.tool_cache }}/uv
activate-environment: true
python-version: ${{ env.PYTHON_VERSION }}
- name: Ensure venv (local & persistent)
run: |
set -e
REQS_HASH=$(sha256sum requirements-test.txt 2>/dev/null | awk '{print $1}' || echo "no_requirements")
MARKER="${{ runner.tool_cache }}/.venv_marker_${{ env.PYTHON_VERSION }}_${REQS_HASH:0:8}"
if [[ -f "$MARKER" ]] && [[ -f "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate" ]]; then
echo "venv exists and hash matches – reuse it"
else
echo "venv stale or missing – recreating"
rm -rf "${{ runner.tool_cache }}/${{ env.VENV_DIR }}" "$MARKER"
python -m venv "${{ runner.tool_cache }}/${{ env.VENV_DIR }}"
# shellcheck source=/dev/null
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
python -m pip install --upgrade pip --no-user
[[ -f requirements-test.txt ]] && \
PIP_NO_BUILD_ISOLATION=1 pip install -r requirements-test.txt --no-user
pip install flash_attn==2.5.8 --no-user --no-build-isolation
touch "$MARKER"
fi
[[ -f requirements-test.txt ]] && \
uv pip install -r requirements-test.txt --no-build-isolation
uv pip install flash_attn==2.5.8 --no-build-isolation
- name: Run format check
run: |
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
git submodule update --init --recursive
set -ex
mkdir -p build
# run cmake to create the build directory with compile_commands.json
cd build; cmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DUSE_CUDA=ON; cd ..
uv pip install cmake
cd build; USE_CUDA=1 cmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=ON; cd ..
if ! output=$(./format.sh 2>&1); then
echo "------------------------------------"
echo "message:"
......@@ -70,56 +60,41 @@ jobs:
needs: format-check
permissions:
contents: read
env:
UV_INDEX_URL: https://mirrors.bfsu.edu.cn/pypi/web/simple
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
fetch-depth: 0
submodules: recursive
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.event.pull_request.head.ref }}
- name: Set up Python
uses: actions/setup-python@v6
- name: Install python via uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: false
cache-local-path: ${{ runner.tool_cache }}/uv
activate-environment: true
python-version: ${{ env.PYTHON_VERSION }}
- name: Ensure venv (local & persistent)
- name: Setup venv
run: |
set -e
REQS_HASH=$(cat requirements-test.txt 2>/dev/null || true)
MARKER="${{ runner.tool_cache }}/.venv_marker_${{ env.PYTHON_VERSION }}_${REQS_HASH:0:8}"
if [[ -f "$MARKER" ]] && [[ -f "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate" ]]; then
echo "venv exists and hash matches – reuse it"
else
echo "venv stale or missing – recreating"
rm -rf "${{ runner.tool_cache }}/${{ env.VENV_DIR }}" "$MARKER"
python -m venv "${{ runner.tool_cache }}/${{ env.VENV_DIR }}"
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
python -m pip install --upgrade pip --no-user
[[ -f requirements-test.txt ]] && \
PIP_NO_BUILD_ISOLATION=1 pip install -r requirements-test.txt --no-user
# flash attention usually requires no isolation build
pip install flash_attn==2.5.8 --no-user --no-build-isolation
pip install . --no-user
touch "$MARKER"
fi
[[ -f requirements-test.txt ]] && \
uv pip install -r requirements-test.txt --no-build-isolation
uv pip install flash_attn==2.5.8 --no-build-isolation
- name: Install project (wheel form)
run: |
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
pip install . --no-user -v
uv pip install .
- name: Run examples
run: |
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
cd examples
unset PYTHONPATH
python -m pytest -n 4 **/test*.py -v -r fE --durations=0 --cache-clear
- name: Run tests
run: |
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
cd testing/python
unset PYTHONPATH
python -m pytest -n 4 -v -r fE --durations=0 --cache-clear --timeout=3600
name: Dist
on:
schedule:
# gemini said this is 6:00 china time
- cron: '0 22 * * *'
release:
types: [ published ]
env:
PYTHON_VERSION: '3.12'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-wheels:
strategy:
matrix:
os: [ubuntu-22.04, ubuntu-22.04-arm, macos-16]
include:
- os: ubuntu-22.04
cuda_version: "12.1"
- os: ubuntu-22.04-arm
cuda_version: "12.8"
fail-fast: true
runs-on: ${{ matrix.os }}
env:
CUDA_VERSION: ${{ matrix.cuda_version }}
NO_VERSION_LABEL: ${{ github.event_name != 'release' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
submodules: recursive
- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
if: startsWith(matrix.os, 'macos')
with:
create-symlink: true
key: ${{ github.job }}-${{ matrix.os }}
- name: Build wheels
uses: pypa/cibuildwheel@v3.2
with:
output-dir: wheelhouse
config-file: "{package}/pyproject.toml"
# just for now to list all files
- name: List wheels
id: ls-whl
run: echo "whl_name=$(ls wheelhouse | head -n1)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v4
with:
name: ${{ steps.ls-whl.outputs.whl_name }}.zip
path: wheelhouse/${{ steps.ls-whl.outputs.whl_name }}
compression-level: 0
......@@ -81,12 +81,12 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
- name: Ensure venv (local & persistent)
run: uv pip install -r requirements-test.txt -r requirements-build.txt
run: uv pip install -r requirements-test.txt
- name: Build wheel
run: |
source .venv/bin/activate
uv pip install -v --no-build-isolation .
uv pip install -v .
- name: Run metal test
run: |
......
......@@ -51,9 +51,9 @@ jobs:
- name: Run format check
run: |
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
git submodule update --init --recursive
git submodule update --init --recursive --checkout
mkdir -p build
cd build; cmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DUSE_ROCM=ON; cd ..
cd build; USE_ROCM=1 cmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=ON; cd ..
if ! output=$(./format.sh 2>&1); then
echo "------------------------------------"
echo "message:"
......@@ -73,7 +73,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v5
with:
fetch-depth: 0
fetch-depth: 1
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.event.pull_request.head.ref }}
......@@ -111,6 +111,7 @@ jobs:
run: |
echo "Installing project (wheel form)"
source "${{ runner.tool_cache }}/${{ env.VENV_DIR }}/bin/activate"
git submodule update --init --recursive --checkout --recommend-shallow
USE_ROCM=True pip install . --no-user
- name: Run tests
......
......@@ -2,6 +2,7 @@
*.slo
*.lo
*.o
*.so
*.obj
*.pyc
......@@ -18,7 +19,7 @@
debug/
build/
dist/
*dist/
wheelhouse/
__pycache__
nnfusion.tar.gz
......@@ -82,18 +83,12 @@ models/frozenmodels/
# .ruff_cache
.ruff_cache
# build sdist
build_sdist/
# exclude debug testing folder
!testing/python/debug
# ignore lib with develop mode
tilelang/lib
# tox
.tox/
# cython
tilelang/jit/adapter/cython/.cycache
......
Subproject commit 883e96b42ae0df40c2f7194cc932bbcd9d0c5627
Subproject commit 5bf17a34602931e7d7e01cbccf358a21fe972779
# Learn a lot from the MLC - LLM Project
# https: // github.com/mlc-ai/mlc-llm/blob/main/CMakeLists.txt
# https://github.com/mlc-ai/mlc-llm/blob/main/CMakeLists.txt
cmake_minimum_required(VERSION 3.18)
cmake_minimum_required(VERSION 3.26)
project(TILE_LANG C CXX)
option(TILE_LANG_STATIC_STDCPP "Statically link libstdc++ for TileLang libraries" ON)
option(TILE_LANG_INSTALL_STATIC_LIB "Install the static library" ON)
if(TILE_LANG_STATIC_STDCPP)
message(STATUS "Enabling static linking of C++ standard library")
# Note: We'll apply static linking flags selectively to avoid Python extension conflicts
# The flags will be applied per-target below rather than globally
endif()
# Set default build type to Release if not provided
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type")
endif()
# Enable compile command export
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(NOT Python_EXECUTABLE)
execute_process(
COMMAND which python
OUTPUT_VARIABLE Python_EXECUTABLE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(Python_EXECUTABLE "${Python_EXECUTABLE}" CACHE FILEPATH "Path to the Python executable")
endif()
# Define a custom macro for globbing files with conditional CONFIGURE_DEPENDS
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
macro(tilelang_file_glob glob variable)
file(${glob} ${variable} CONFIGURE_DEPENDS ${ARGN})
endmacro()
else()
macro(tilelang_file_glob glob variable)
file(${glob} ${variable} ${ARGN})
endmacro()
endif()
# Handle TVM prebuild path or use default configuration
if(DEFINED TVM_PREBUILD_PATH)
message(STATUS "TVM_PREBUILD_PATH: ${TVM_PREBUILD_PATH}")
if(EXISTS ${TVM_PREBUILD_PATH}/config.cmake)
include(${TVM_PREBUILD_PATH}/config.cmake)
endif()
else()
if(EXISTS ${CMAKE_BINARY_DIR}/config.cmake)
include(${CMAKE_BINARY_DIR}/config.cmake)
elseif(EXISTS ${CMAKE_SOURCE_DIR}/config.cmake)
include(${CMAKE_SOURCE_DIR}/config.cmake)
endif()
# Set default build type to RelWithDebInfo if not provided
if(NOT CMAKE_BUILD_TYPE)
# Set default build type to Release if not provided
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
message(STATUS "Setting default build type to ${CMAKE_BUILD_TYPE}")
endif()
endif()
# include cmake modules
include(CheckCXXCompilerFlag)
# Enable static runtime build if required
if(TILE_LANG_INSTALL_STATIC_LIB)
set(BUILD_STATIC_RUNTIME ON)
endif()
# Enforce CUDA standard
if(USE_CUDA)
set(CMAKE_CUDA_STANDARD 17)
endif()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
# Enforce HIP standard
if(USE_ROCM)
set(CMAKE_HIP_STANDARD 17)
check_cxx_compiler_flag("-std=c++17" SUPPORT_CXX17)
set(CMAKE_CXX_FLAGS "-D__HIP_PLATFORM_AMD__ ${CMAKE_CXX_FLAGS}")
find_program(CCACHE_PROGRAM ccache)
if(CCACHE_PROGRAM)
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "C compiler launcher")
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "CXX compiler launcher")
set(CMAKE_CUDA_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "CUDA compiler launcher")
endif()
# Enforce C++ standard
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
# Configs
set(USE_CUDA OFF)
set(USE_ROCM OFF)
set(USE_METAL OFF)
set(PREBUILD_CYTHON ON)
# Configs end
# Locate TVM prebuild path
if(NOT DEFINED TVM_PREBUILD_PATH)
if(DEFINED ENV{TVM_PREBUILD_PATH})
set(TVM_PREBUILD_PATH "$ENV{TVM_PREBUILD_PATH}")
endif()
endif()
include(cmake/load_tvm.cmake)
# Locate TVM source directory
if(NOT DEFINED TVM_SOURCE_DIR)
if(DEFINED ENV{TVM_SOURCE_DIR})
set(TVM_SOURCE_DIR "$ENV{TVM_SOURCE_DIR}")
elseif(DEFINED TVM_PREBUILD_PATH)
set(TVM_SOURCE_DIR "${TVM_PREBUILD_PATH}/..")
else()
set(TVM_SOURCE_DIR ${PROJECT_SOURCE_DIR}/3rdparty/tvm)
endif()
endif()
# Handle TVM prebuild or build TVM from source
if(DEFINED TVM_PREBUILD_PATH)
message(STATUS "Using prebuilt TVM from ${TVM_PREBUILD_PATH}")
add_library(tvm SHARED IMPORTED)
find_library(TVM_LIBRARY_LOCATION
NAMES tvm
HINTS "${TVM_PREBUILD_PATH}"
)
set_target_properties(tvm PROPERTIES
IMPORTED_LOCATION "${TVM_LIBRARY_LOCATION}"
INTERFACE_INCLUDE_DIRECTORIES "${TVM_PREBUILD_PATH}/../include"
)
add_library(tvm_runtime SHARED IMPORTED)
find_library(TVM_RUNTIME_LIBRARY_LOCATION
NAMES tvm_runtime
HINTS "${TVM_PREBUILD_PATH}"
)
set_target_properties(tvm_runtime PROPERTIES
IMPORTED_LOCATION "${TVM_RUNTIME_LIBRARY_LOCATION}"
INTERFACE_INCLUDE_DIRECTORIES "${TVM_PREBUILD_PATH}/../include"
)
if(EXISTS ${TVM_SOURCE}/cmake/config.cmake)
include(${TVM_SOURCE}/cmake/config.cmake)
else()
message(STATUS "Building TVM from source at ${TVM_SOURCE_DIR}")
add_subdirectory(${TVM_SOURCE_DIR} tvm EXCLUDE_FROM_ALL)
message(FATAL_ERROR "Nor tvm provided or submodule checkout-ed.")
endif()
# Include directories for TileLang
set(TILE_LANG_INCLUDES ${TVM_INCLUDES})
# Collect source files
tilelang_file_glob(GLOB TILE_LANG_SRCS
file(GLOB TILE_LANG_SRCS
src/*.cc
src/layout/*.cc
src/transform/*.cc
......@@ -145,142 +50,118 @@ tilelang_file_glob(GLOB TILE_LANG_SRCS
src/target/intrin_rule*.cc
)
# Include CUDA source files if CUDA is enabled
if(USE_CUDA)
tilelang_file_glob(GLOB TILE_LANG_CUDA_SRCS
src/runtime/*.cc
src/target/ptx.cc
src/target/codegen_cuda.cc
src/target/rt_mod_cuda.cc
)
list(APPEND TILE_LANG_SRCS ${TILE_LANG_CUDA_SRCS})
endif()
# Include ROCm source files if ROCm is enabled
if(USE_ROCM)
tilelang_file_glob(GLOB TILE_LANG_HIP_SRCS
src/target/codegen_hip.cc
src/target/rt_mod_hip.cc
)
list(APPEND TILE_LANG_SRCS ${TILE_LANG_HIP_SRCS})
# Backend-specific checks and configs
if($ENV{USE_METAL})
set(USE_METAL ON)
elseif(APPLE)
message(STATUS "Enable Metal support by default.")
set(USE_METAL ON)
elseif($ENV{USE_ROCM})
set(USE_ROCM ON)
else()
if($ENV{USE_CUDA})
set(USE_CUDA ON)
elseif(DEFINED ENV{USE_CUDA} AND NOT $ENV{USE_CUDA})
# Build CPU-only when we explicitly disable CUDA
set(USE_CUDA OFF)
else()
message(STATUS "Enable CUDA support by default.")
set(USE_CUDA ON)
endif()
endif()
if(USE_METAL)
tilelang_file_glob(GLOB TILE_LANG_METAL_SRCS
file(GLOB TILE_LANG_METAL_SRCS
src/target/rt_mod_metal.cc
)
list(APPEND TILE_LANG_SRCS ${TILE_LANG_METAL_SRCS})
endif()
message(STATUS "Collected source files: ${TILE_LANG_SRCS}")
# Add TileLang object library
add_library(tilelang_objs OBJECT ${TILE_LANG_SRCS})
message(STATUS "TVM_SOURCE_DIR: ${TVM_SOURCE_DIR}")
# Include directories for TileLang
set(TILE_LANG_INCLUDES
${TVM_SOURCE_DIR}/include
${TVM_SOURCE_DIR}/ffi/include
${TVM_SOURCE_DIR}/src
${TVM_SOURCE_DIR}/3rdparty/dlpack/include
${TVM_SOURCE_DIR}/3rdparty/dmlc-core/include
)
elseif(USE_ROCM)
set(CMAKE_HIP_STANDARD 17)
include(${TVM_SOURCE}/cmake/utils/FindROCM.cmake)
find_rocm($ENV{USE_ROCM})
add_compile_definitions(__HIP_PLATFORM_AMD__ __HIP_PLATFORM_HCC__=1)
# Find CUDA Toolkit
if(USE_CUDA)
file(GLOB TILE_LANG_HIP_SRCS
src/target/codegen_hip.cc
src/target/rt_mod_hip.cc
)
list(APPEND TILE_LANG_SRCS ${TILE_LANG_HIP_SRCS})
list(APPEND TILE_LANG_INCLUDES ${ROCM_INCLUDE_DIRS})
elseif(USE_CUDA)
set(CMAKE_CUDA_STANDARD 17)
find_package(CUDAToolkit REQUIRED)
add_compile_definitions("CUDA_MAJOR_VERSION=${CUDAToolkit_VERSION_MAJOR}")
if(NOT CUDAToolkit_FOUND)
message(FATAL_ERROR "CUDA Toolkit not found. Please set CUDAToolkit_ROOT.")
endif()
# Set `USE_CUDA=/usr/local/cuda-x.y`
cmake_path(GET CUDAToolkit_BIN_DIR PARENT_PATH USE_CUDA)
message(STATUS "CUDA Toolkit includes: ${CUDAToolkit_INCLUDE_DIRS}")
set(CUDA_MAJOR_VERSION ${CUDAToolkit_VERSION_MAJOR})
message(STATUS "Setting CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}")
add_compile_definitions(CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION})
file(GLOB TILE_LANG_CUDA_SRCS
src/runtime/*.cc
src/target/ptx.cc
src/target/codegen_cuda.cc
src/target/rt_mod_cuda.cc
)
list(APPEND TILE_LANG_SRCS ${TILE_LANG_CUDA_SRCS})
list(APPEND TILE_LANG_INCLUDES ${CUDAToolkit_INCLUDE_DIRS})
endif(USE_CUDA)
endif()
# Find ROCM Toolkit
if(USE_ROCM)
find_rocm(${USE_ROCM})
message(STATUS "USE_ROCM: ${USE_ROCM}")
# Include tvm after configs have been populated
add_subdirectory(${TVM_SOURCE} tvm EXCLUDE_FROM_ALL)
if(ROCM_FOUND)
# always set the includedir
# avoid global retrigger of cmake
include_directories(SYSTEM ${ROCM_INCLUDE_DIRS})
add_definitions(-D__HIP_PLATFORM_HCC__=1)
else()
message(FATAL_ERROR "ROCM Toolkit not found. Please set HIP_ROOT.")
endif(ROCM_FOUND)
# Resolve compile warnings in tvm
add_compile_definitions(DMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>)
message(STATUS "ROCM Toolkit includes: ${ROCM_INCLUDE_DIRS}")
list(APPEND TILE_LANG_INCLUDES ${ROCM_INCLUDE_DIRS})
endif(USE_ROCM)
# Define compile-time macros
set(TILE_LANG_COMPILE_DEFS
DMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>
__STDC_FORMAT_MACROS=1
PICOJSON_USE_INT64
)
add_library(tilelang_objs OBJECT ${TILE_LANG_SRCS})
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
target_compile_definitions(tilelang_objs PRIVATE "TVM_LOG_DEBUG")
endif()
# Set target properties for object library
target_include_directories(tilelang_objs PRIVATE ${TILE_LANG_INCLUDES})
target_compile_definitions(tilelang_objs PRIVATE ${TILE_LANG_COMPILE_DEFS})
target_compile_definitions(tilelang_objs PRIVATE -DTILE_LANG_EXPORTS)
# Shared library
add_library(tilelang SHARED $<TARGET_OBJECTS:tilelang_objs>)
add_library(tilelang_module SHARED $<TARGET_OBJECTS:tilelang_objs>)
target_link_libraries(tilelang PUBLIC tvm_runtime)
if(USE_METAL)
target_link_libraries(tilelang_module PUBLIC tvm)
if(APPLE)
# FIXME: libtilelang should only link against tvm runtime
target_link_libraries(tilelang PUBLIC tvm)
endif()
# Build cython extension
find_package(Python REQUIRED COMPONENTS Interpreter Development.Module ${SKBUILD_SABI_COMPONENT})
# Static library
add_library(tilelang_static STATIC $<TARGET_OBJECTS:tilelang_objs>)
add_dependencies(tilelang_static tvm_runtime)
set_target_properties(tilelang_static PROPERTIES OUTPUT_NAME tilelang)
add_custom_command(
OUTPUT "${CMAKE_BINARY_DIR}/cython_wrapper.cpp"
COMMENT
"Cythoning tilelang/jit/adapter/cython/cython_wrapper.pyx"
COMMAND Python::Interpreter -m cython
"${CMAKE_CURRENT_SOURCE_DIR}/tilelang/jit/adapter/cython/cython_wrapper.pyx"
--cplus --output-file "${CMAKE_BINARY_DIR}/cython_wrapper.cpp"
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/tilelang/jit/adapter/cython/cython_wrapper.pyx"
VERBATIM)
# Apply static linking flags only to static library to avoid Python extension conflicts
if(TILE_LANG_STATIC_STDCPP AND CMAKE_CXX_COMPILER_ID MATCHES "GNU")
target_link_options(tilelang_static PRIVATE -static-libstdc++ -static-libgcc)
if(NOT "${SKBUILD_SABI_VERSION}" STREQUAL "")
set(USE_SABI USE_SABI ${SKBUILD_SABI_VERSION})
endif()
# Debug build type-specific definitions
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
target_compile_definitions(tilelang PRIVATE "TVM_LOG_DEBUG")
target_compile_definitions(tilelang_objs PRIVATE "TVM_LOG_DEBUG")
target_compile_definitions(tilelang_static PRIVATE "TVM_LOG_DEBUG")
endif()
python_add_library(cython_wrapper MODULE "${CMAKE_BINARY_DIR}/cython_wrapper.cpp" ${USE_SABI} WITH_SOABI)
# Install to site dir to support direct import
install(TARGETS cython_wrapper LIBRARY DESTINATION .)
# Building tvm_cython modules
if(NOT DEFINED TVM_PREBUILD_PATH)
add_dependencies(tilelang tvm_cython)
# let libtilelang to search tvm/tvm_runtime in same dir
if(APPLE)
set_target_properties(tilelang PROPERTIES INSTALL_RPATH "@loader_path")
set_target_properties(tilelang_module PROPERTIES INSTALL_RPATH "@loader_path")
else()
set_target_properties(tilelang PROPERTIES INSTALL_RPATH "\$ORIGIN")
set_target_properties(tilelang_module PROPERTIES INSTALL_RPATH "\$ORIGIN")
endif()
# Module shared library
add_library(tilelang_module SHARED $<TARGET_OBJECTS:tilelang_objs>)
target_link_libraries(tilelang_module PUBLIC tvm)
install(TARGETS tvm tvm_runtime tilelang_module tilelang LIBRARY DESTINATION tilelang/lib)
# Install targets
if(TILE_LANG_INSTALL_STATIC_LIB)
install(TARGETS tilelang_static tvm_runtime
LIBRARY DESTINATION lib${LIB_SUFFIX}
)
else()
if(DEFINED TVM_PREBUILD_PATH)
install(TARGETS tilelang tilelang_module
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib${LIB_SUFFIX}
)
else()
install(TARGETS tvm_runtime tilelang tilelang_module
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib${LIB_SUFFIX}
)
endif()
# Copy tvm cython ext for wheels
# TODO: not necessary for editable builds
if(TVM_BUILD_FROM_SOURCE)
add_dependencies(tilelang tvm_cython)
install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/tvm/python/tvm/ffi/core.abi3.so" DESTINATION tilelang/3rdparty/tvm/python/tvm/ffi/)
endif()
# todo: support prebuilt tvm
set(TVM_BUILD_FROM_SOURCE TRUE)
set(TVM_SOURCE ${CMAKE_SOURCE_DIR}/3rdparty/tvm)
if(DEFINED $ENV{TVM_ROOT})
if(EXISTS $ENV{TVM_ROOT}/cmake/config.cmake)
set(TVM_SOURCE $ENV{TVM_ROOT})
endif()
endif()
set(TVM_INCLUDES
${TVM_SOURCE}/include
${TVM_SOURCE}/ffi/include
${TVM_SOURCE}/src
${TVM_SOURCE}/3rdparty/dlpack/include
${TVM_SOURCE}/3rdparty/dmlc-core/include
)
......@@ -4,9 +4,9 @@
**Prerequisites for installation via wheel or PyPI:**
- **Operating System**: Ubuntu 20.04 or later
- **glibc**: 2.28 (Ubuntu 20.04 or later)
- **Python Version**: >= 3.8
- **CUDA Version**: >= 11.0
- **CUDA Version**: 12.0 <= CUDA < 13
The easiest way to install **tile-lang** is directly from PyPI using pip. To install the latest version, run the following command in your terminal:
......@@ -37,14 +37,11 @@ python -c "import tilelang; print(tilelang.__version__)"
**Prerequisites for building from source:**
- **Operating System**: Linux
- **Python Version**: >= 3.7
- **Python Version**: >= 3.8
- **CUDA Version**: >= 10.0
- **LLVM**: < 20 if you are using the bundled TVM submodule
We recommend using a Docker container with the necessary dependencies to build **tile-lang** from source. You can use the following command to run a Docker container with the required dependencies:
```bash
docker run --gpus all -it --rm --ipc=host nvcr.io/nvidia/pytorch:23.01-py3
docker run -it --rm --ipc=host nvcr.io/nvidia/pytorch:23.01-py3
```
To build and install **tile-lang** directly from source, follow these steps. This process requires certain pre-requisites from Apache TVM, which can be installed on Ubuntu/Debian-based systems using the following commands:
......@@ -59,21 +56,20 @@ After installing the prerequisites, you can clone the **tile-lang** repository a
```bash
git clone --recursive https://github.com/tile-ai/tilelang.git
cd tilelang
pip install . # Please be patient, this may take some time.
pip install . -v
```
If you want to install **tile-lang** in development mode, you can run the following command:
```bash
pip install -e .
pip install -e . -v
```
We currently provide four methods to install **tile-lang**:
1. [Install Using Docker](#install-method-1) (Recommended)
2. [Install from Source (using your own TVM installation)](#install-method-2)
3. [Install from Source (using the bundled TVM submodule)](#install-method-3)
4. [Install Using the Provided Script](#install-method-4)
2. [Install from Source (using the bundled TVM submodule)](#install-method-2)
3. [Install from Source (using your own TVM installation)](#install-method-3)
(install-method-1)=
......@@ -83,8 +79,7 @@ For users who prefer a containerized environment with all dependencies pre-confi
**Prerequisites:**
- Docker installed on your system
- NVIDIA Docker runtime (nvidia-docker2) for GPU support
- Compatible NVIDIA GPU (e.g., B200, H100, etc.)
- NVIDIA Docker runtime or GPU is not necessary for building tilelang, you can build on a host without GPU and use that built image on other machine.
1. **Clone the Repository**:
......@@ -156,7 +151,7 @@ This Docker-based installation method provides a complete, isolated environment
(install-method-2)=
### Method 2: Install from Source (Using Your Own TVM Installation)
### Method 2: Install from Source (Using the Bundled TVM Submodule)
If you already have a compatible TVM installation, follow these steps:
......@@ -174,25 +169,12 @@ cd tilelang
Create a build directory and specify your existing TVM path:
```bash
mkdir build
cd build
cmake .. -DTVM_PREBUILD_PATH=/your/path/to/tvm/build # e.g., /workspace/tvm/build
make -j 16
```
3. **Set Environment Variables**:
Update `PYTHONPATH` to include the `tile-lang` Python module:
```bash
export PYTHONPATH=/your/path/to/tilelang/:$PYTHONPATH
# TVM_IMPORT_PYTHON_PATH is used by 3rd-party frameworks to import TVM
export TVM_IMPORT_PYTHON_PATH=/your/path/to/tvm/python
pip install . -v
```
(install-method-3)=
### Method 3: Install from Source (Using the Bundled TVM Submodule)
### Method 3: Install from Source (Using Your Own TVM Installation)
If you prefer to use the built-in TVM version, follow these instructions:
......@@ -210,53 +192,62 @@ cd tilelang
Copy the configuration file and enable the desired backends (e.g., LLVM and CUDA):
```bash
mkdir build
cp 3rdparty/tvm/cmake/config.cmake build
cd build
# echo "set(USE_LLVM ON)" # set USE_LLVM to ON if using LLVM
echo "set(USE_CUDA ON)" >> config.cmake
# or echo "set(USE_ROCM ON)" >> config.cmake to enable ROCm runtime
cmake ..
make -j 16
TVM_ROOT=<your-tvm-repo> pip install . -v
```
The build outputs (e.g., `libtilelang.so`, `libtvm.so`, `libtvm_runtime.so`) will be generated in the `build` directory.
3. **Set Environment Variables**:
## Install with Nightly Version
Ensure the `tile-lang` Python package is in your `PYTHONPATH`:
For users who want access to the latest features and improvements before official releases, we provide nightly builds of **tile-lang**.
```bash
export PYTHONPATH=/your/path/to/tilelang/:$PYTHONPATH
pip install tilelang -f https://tile-ai.github.io/whl/nightly/cu121/
# or pip install tilelang --find-links https://tile-ai.github.io/whl/nightly/cu121/
```
(install-method-4)=
> **Note:** Nightly builds contain the most recent code changes but may be less stable than official releases. They're ideal for testing new features or if you need a specific bugfix that hasn't been released yet.
### Method 4: Install Using the Provided Script
## Install Configs
For a simplified installation, use the provided script:
tilelang use ffi/cython/dlpack to interact with pytorch tensor,
so `--no-build-isolation` and similar configs are not necessary.
1. **Clone the Repository**:
### Build-time environment variables
`USE_CUDA`: If to enable CUDA support, default: `ON` on Linux, set to `OFF` to build a CPU version. By default, we'll use `/usr/local/cuda` for building tilelang. Set `CUDAToolkit_ROOT` to use different cuda toolkit.
```bash
git clone --recursive https://github.com/tile-ai/tilelang
cd tilelang
```
`USE_ROCM`: If to enable ROCm support, default: `OFF`. If your ROCm SDK does not located in `/opt/rocm`, set `USE_ROCM=<rocm_sdk>` to enable build ROCm against custom sdk path.
2. **Run the Installation Script**:
`USE_METAL`: If to enable Metal support, default: `ON` on Darwin.
```bash
bash install_cuda.sh
# or bash `install_amd.sh` if you want to enable ROCm runtime
`TVM_ROOT`: TVM source root to use.
`NO_VERSION_LABEL` and `NO_TOOLCHAIN_VERSION`:
When building tilelang, we'll try to embed SDK and version information into package version as below,
where local version label could look like `<sdk>.git<git_hash>`. Set `NO_VERSION_LABEL=ON` to disable this behavior.
```
$ python -mbuild -w
...
Successfully built tilelang-0.1.6.post1+cu116.git0d4a74be-cp38-abi3-linux_x86_64.whl
```
## Install with Nightly Version
where `<sdk>={cuda,rocm,metal}`. Specifically, when `<sdk>=cuda` and `CUDA_VERSION` is provided via env,
`<sdk>=cu<cuda_major><cuda_minor>`, similar with this part in pytorch.
Set `NO_TOOLCHAIN_VERSION=ON` to disable this.
For users who want access to the latest features and improvements before official releases, we provide nightly builds of **tile-lang**.
### Run-time environment variables
```bash
pip install tilelang -f https://tile-ai.github.io/whl/nightly/cu121/
# or pip install tilelang --find-links https://tile-ai.github.io/whl/nightly/cu121/
```
<!-- TODO: tvm -->
> **Note:** Nightly builds contain the most recent code changes but may be less stable than official releases. They're ideal for testing new features or if you need a specific bugfix that hasn't been released yet.
## IDE Configs
Building tilelang locally will automatically `compile_commands.json` file in `build` dir.
VSCode with clangd and [clangd extension](https://marketplace.visualstudio.com/items?itemName=llvm-vs-code-extensions.vscode-clangd) should be able to index that without extra configuration.
## Compile cache
`ccache` will be automatically used if found.
## Repairing wheels
If you plan to use your wheel in other environment,
it's recommend to use auditwheel (on Linux) or delocate (on Darwin)
to repair them.
echo "Starting installation script..."
# Step 1: Install Python requirements
echo "Installing Python requirements from requirements.txt..."
pip install -r requirements-build.txt
pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Error: Failed to install Python requirements."
exit 1
else
echo "Python requirements installed successfully."
fi
# Step 2: Define LLVM version and architecture
LLVM_VERSION="10.0.1"
IS_AARCH64=false
EXTRACT_PATH="3rdparty"
echo "LLVM version set to ${LLVM_VERSION}."
echo "Is AARCH64 architecture: $IS_AARCH64"
# Step 3: Determine the correct Ubuntu version based on LLVM version
UBUNTU_VERSION="16.04"
if [[ "$LLVM_VERSION" > "17.0.0" ]]; then
UBUNTU_VERSION="22.04"
elif [[ "$LLVM_VERSION" > "16.0.0" ]]; then
UBUNTU_VERSION="20.04"
elif [[ "$LLVM_VERSION" > "13.0.0" ]]; then
UBUNTU_VERSION="18.04"
fi
echo "Ubuntu version for LLVM set to ${UBUNTU_VERSION}."
# Step 4: Set download URL and file name for LLVM
BASE_URL="https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VERSION}"
if $IS_AARCH64; then
FILE_NAME="clang+llvm-${LLVM_VERSION}-aarch64-linux-gnu.tar.xz"
else
FILE_NAME="clang+llvm-${LLVM_VERSION}-x86_64-linux-gnu-ubuntu-${UBUNTU_VERSION}.tar.xz"
fi
DOWNLOAD_URL="${BASE_URL}/${FILE_NAME}"
echo "Download URL for LLVM: ${DOWNLOAD_URL}"
# Step 5: Create extraction directory
echo "Creating extraction directory at ${EXTRACT_PATH}..."
mkdir -p "$EXTRACT_PATH"
if [ $? -ne 0 ]; then
echo "Error: Failed to create extraction directory."
exit 1
else
echo "Extraction directory created successfully."
fi
# Step 6: Download LLVM
echo "Downloading $FILE_NAME from $DOWNLOAD_URL..."
curl -L -o "${EXTRACT_PATH}/${FILE_NAME}" "$DOWNLOAD_URL"
if [ $? -ne 0 ]; then
echo "Error: Download failed!"
exit 1
else
echo "Download completed successfully."
fi
# Step 7: Extract LLVM
echo "Extracting $FILE_NAME to $EXTRACT_PATH..."
tar -xJf "${EXTRACT_PATH}/${FILE_NAME}" -C "$EXTRACT_PATH"
if [ $? -ne 0 ]; then
echo "Error: Extraction failed!"
exit 1
else
echo "Extraction completed successfully."
fi
# Step 8: Determine LLVM config path
LLVM_CONFIG_PATH="$(realpath ${EXTRACT_PATH}/$(basename ${FILE_NAME} .tar.xz)/bin/llvm-config)"
echo "LLVM config path determined as: $LLVM_CONFIG_PATH"
# Step 9: Clone and build TVM
echo "Cloning TVM repository and initializing submodules..."
# clone and build tvm
git submodule update --init --recursive
if [ -d build ]; then
rm -rf build
fi
mkdir build
cp 3rdparty/tvm/cmake/config.cmake build
cd build
echo "Configuring TVM build with LLVM and CUDA paths..."
echo "set(USE_LLVM $LLVM_CONFIG_PATH)" >> config.cmake
echo "Running CMake for TileLang..."
cmake ..
if [ $? -ne 0 ]; then
echo "Error: CMake configuration failed."
exit 1
fi
echo "Building TileLang with make..."
make -j
if [ $? -ne 0 ]; then
echo "Error: TileLang build failed."
exit 1
else
echo "TileLang build completed successfully."
fi
cd ..
# Step 11: Set environment variables
TILELANG_PATH="$(pwd)"
echo "Configuring environment variables for TVM..."
echo "export PYTHONPATH=${TILELANG_PATH}:\$PYTHONPATH" >> ~/.bashrc
# Step 12: Source .bashrc to apply changes
echo "Applying environment changes by sourcing .bashrc..."
source ~/.bashrc
if [ $? -ne 0 ]; then
echo "Error: Failed to source .bashrc."
exit 1
else
echo "Environment configured successfully."
fi
echo "Installation script completed successfully."
# Add command line option parsing
USE_LLVM=false
while [[ $# -gt 0 ]]; do
case $1 in
--enable-llvm)
USE_LLVM=true
shift
;;
*)
echo "Unknown option: $1"
echo "Usage: $0 [--enable-llvm]"
exit 1
;;
esac
done
echo "Starting installation script..."
echo "LLVM enabled: $USE_LLVM"
# Step 1: Install Python requirements
echo "Installing Python requirements from requirements.txt..."
pip install -r requirements-build.txt
pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Error: Failed to install Python requirements."
exit 1
else
echo "Python requirements installed successfully."
fi
# Step 2: Define LLVM version and architecture
if $USE_LLVM; then
LLVM_VERSION="10.0.1"
IS_AARCH64=false
EXTRACT_PATH="3rdparty"
echo "LLVM version set to ${LLVM_VERSION}."
echo "Is AARCH64 architecture: $IS_AARCH64"
# Step 3: Determine the correct Ubuntu version based on LLVM version
UBUNTU_VERSION="16.04"
if [[ "$LLVM_VERSION" > "17.0.0" ]]; then
UBUNTU_VERSION="22.04"
elif [[ "$LLVM_VERSION" > "16.0.0" ]]; then
UBUNTU_VERSION="20.04"
elif [[ "$LLVM_VERSION" > "13.0.0" ]]; then
UBUNTU_VERSION="18.04"
fi
echo "Ubuntu version for LLVM set to ${UBUNTU_VERSION}."
# Step 4: Set download URL and file name for LLVM
BASE_URL="https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VERSION}"
if $IS_AARCH64; then
FILE_NAME="clang+llvm-${LLVM_VERSION}-aarch64-linux-gnu.tar.xz"
else
FILE_NAME="clang+llvm-${LLVM_VERSION}-x86_64-linux-gnu-ubuntu-${UBUNTU_VERSION}.tar.xz"
fi
DOWNLOAD_URL="${BASE_URL}/${FILE_NAME}"
echo "Download URL for LLVM: ${DOWNLOAD_URL}"
# Step 5: Create extraction directory
echo "Creating extraction directory at ${EXTRACT_PATH}..."
mkdir -p "$EXTRACT_PATH"
if [ $? -ne 0 ]; then
echo "Error: Failed to create extraction directory."
exit 1
else
echo "Extraction directory created successfully."
fi
# Step 6: Download LLVM
echo "Downloading $FILE_NAME from $DOWNLOAD_URL..."
curl -L -o "${EXTRACT_PATH}/${FILE_NAME}" "$DOWNLOAD_URL"
if [ $? -ne 0 ]; then
echo "Error: Download failed!"
exit 1
else
echo "Download completed successfully."
fi
# Step 7: Extract LLVM
echo "Extracting $FILE_NAME to $EXTRACT_PATH..."
tar -xJf "${EXTRACT_PATH}/${FILE_NAME}" -C "$EXTRACT_PATH"
if [ $? -ne 0 ]; then
echo "Error: Extraction failed!"
exit 1
else
echo "Extraction completed successfully."
fi
# Step 8: Determine LLVM config path
LLVM_CONFIG_PATH="$(realpath ${EXTRACT_PATH}/$(basename ${FILE_NAME} .tar.xz)/bin/llvm-config)"
echo "LLVM config path determined as: $LLVM_CONFIG_PATH"
fi
# Step 9: Clone and build TVM
echo "Cloning TVM repository and initializing submodules..."
# clone and build tvm
git submodule update --init --recursive
if [ -d build ]; then
rm -rf build
fi
mkdir build
cp 3rdparty/tvm/cmake/config.cmake build
cd build
echo "Configuring TVM build with CUDA paths..."
if $USE_LLVM; then
echo "set(USE_LLVM \"$LLVM_CONFIG_PATH\")" >> config.cmake
fi
CUDA_HOME=$(python -c "import sys; sys.path.append('../tilelang'); from env import CUDA_HOME; print(CUDA_HOME)") || \
{ echo "ERROR: Failed to retrieve CUDA_HOME via Python script." >&2; exit 1; } && \
{ [ -n "$CUDA_HOME" ] || { echo "ERROR: CUDA_HOME is empty, check CUDA installation or _find_cuda_home() in setup.py" >&2; exit 1; }; } && \
echo "set(USE_CUDA \"$CUDA_HOME\")" >> config.cmake
echo "Running CMake for TileLang..."
cmake ..
if [ $? -ne 0 ]; then
echo "Error: CMake configuration failed."
exit 1
fi
echo "Building TileLang with make..."
# Calculate 75% of available CPU cores
# Other wise, make will use all available cores
# and it may cause the system to be unresponsive
CORES=$(nproc)
MAKE_JOBS=$(( CORES * 75 / 100 ))
make -j${MAKE_JOBS}
if [ $? -ne 0 ]; then
echo "Error: TileLang build failed."
exit 1
else
echo "TileLang build completed successfully."
fi
cd ..
# Step 11: Set environment variables
TILELANG_PATH="$(pwd)"
echo "TileLang path set to: $TILELANG_PATH"
echo "Configuring environment variables for TVM..."
echo "export PYTHONPATH=${TILELANG_PATH}:\$PYTHONPATH" >> ~/.bashrc
# Step 12: Source .bashrc to apply changes
echo "Applying environment changes by sourcing .bashrc..."
source ~/.bashrc
if [ $? -ne 0 ]; then
echo "Error: Failed to source .bashrc."
exit 1
else
echo "Environment configured successfully."
fi
echo "Installation script completed successfully."
#!/bin/bash
set -eux
git submodule update --init --recursive
rm -rf build
mkdir build
cp 3rdparty/tvm/cmake/config.cmake build
cd build
echo "set(USE_METAL ON)" >> config.cmake
CMAKE_C_COMPILER_LAUNCHER=ccache CMAKE_CXX_COMPILER_LAUNCHER=ccache cmake ..
CORES=$(sysctl -n hw.logicalcpu)
MAKE_JOBS=$(( CORES / 2 ))
make -j${MAKE_JOBS}
echo "Starting installation script..."
# install requirements
pip install -r requirements-build.txt
pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Error: Failed to install Python requirements."
exit 1
else
echo "Python requirements installed successfully."
fi
# determine if root
USER_IS_ROOT=false
if [ "$EUID" -eq 0 ]; then
USER_IS_ROOT=true
fi
if $USER_IS_ROOT; then
# Fetch the GPG key for the LLVM repository and add it to the trusted keys
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
# Check if the repository is already present in the sources.list
if ! grep -q "http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" /etc/apt/sources.list; then
# Add the LLVM repository to sources.list
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" >> /etc/apt/sources.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" >> /etc/apt/sources.list
else
# Print a message if the repository is already added
echo "The repository is already added."
fi
# Update package lists and install llvm-16
apt-get update
apt-get install -y llvm-16
else
# Fetch the GPG key for the LLVM repository and add it to the trusted keys using sudo
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
# Check if the repository is already present in the sources.list
if ! grep -q "http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" /etc/apt/sources.list; then
# Add the LLVM repository to sources.list using sudo
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" | sudo tee -a /etc/apt/sources.list
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main" | sudo tee -a /etc/apt/sources.list
else
# Print a message if the repository is already added
echo "The repository is already added."
fi
# Update package lists and install llvm-16 using sudo
sudo apt-get update
sudo apt-get install -y llvm-16
fi
# Step 9: Clone and build TVM
echo "Cloning TVM repository and initializing submodules..."
# clone and build tvm
git submodule update --init --recursive
if [ -d build ]; then
rm -rf build
fi
mkdir build
cp 3rdparty/tvm/cmake/config.cmake build
cd build
echo "Configuring TVM build with LLVM and CUDA paths..."
echo "set(USE_LLVM llvm-config-16)" >> config.cmake && echo "set(USE_ROCM /opt/rocm)" >> config.cmake
echo "Running CMake for TileLang..."
cmake ..
if [ $? -ne 0 ]; then
echo "Error: CMake configuration failed."
exit 1
fi
echo "Building TileLang with make..."
make -j
if [ $? -ne 0 ]; then
echo "Error: TileLang build failed."
exit 1
else
echo "TileLang build completed successfully."
fi
cd ..
# Define the lines to be added
TILELANG_PATH="$(pwd)"
echo "Configuring environment variables for TVM..."
echo "export PYTHONPATH=${TILELANG_PATH}:\$PYTHONPATH" >> ~/.bashrc
TVM_HOME_ENV="export TVM_HOME=${TILELANG_PATH}/3rdparty/tvm"
TILELANG_PYPATH_ENV="export PYTHONPATH=\$TVM_HOME/python:${TILELANG_PATH}:\$PYTHONPATH"
# Check and add the first line if not already present
if ! grep -qxF "$TVM_HOME_ENV" ~/.bashrc; then
echo "$TVM_HOME_ENV" >> ~/.bashrc
echo "Added TVM_HOME to ~/.bashrc"
else
echo "TVM_HOME is already set in ~/.bashrc"
fi
# Check and add the second line if not already present
if ! grep -qxF "$TILELANG_PYPATH_ENV" ~/.bashrc; then
echo "$TILELANG_PYPATH_ENV" >> ~/.bashrc
echo "Added PYTHONPATH to ~/.bashrc"
else
echo "PYTHONPATH is already set in ~/.bashrc"
fi
# Reload ~/.bashrc to apply the changes
source ~/.bashrc
echo "Installation script completed successfully."
set -eux
# Get the CUDA version from the command line
IMAGE="tilelang-builder:manylinux"
docker build . -f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" --tag ${IMAGE}
install_pip="python3.8 -m pip install --upgrade pip && python3.8 -m pip install -r requirements-build.txt"
tox_command="python3.8 -m tox -e py38,py39,py310,py311,py312"
script="sh maint/scripts/local_distribution.sh"
docker run --rm --gpus all -v $(pwd):/tilelang ${IMAGE} /bin/bash -c "$install_pip && $tox_command"
docker run --rm -v $(pwd):/tilelang ${IMAGE} /bin/bash -c "$script"
set -eux
# Get the CUDA version from the command line
IMAGE="tilelang-builder:manylinux"
docker build . -f "$(dirname "${BASH_SOURCE[0]}")/pypi.manylinux.Dockerfile" --tag ${IMAGE}
install_pip="python3.8 -m pip install --upgrade pip && python3.8 -m pip install -r requirements-build.txt"
tox_command="python3.8 -m tox -e py38-pypi,py39-pypi,py310-pypi,py311-pypi,py312-pypi"
script="sh maint/scripts/pypi_distribution.sh"
docker run --rm --gpus all -v $(pwd):/tilelang ${IMAGE} /bin/bash -c "$install_pip && $tox_command"
docker run --rm -v $(pwd):/tilelang -w /tilelang ${IMAGE} /bin/bash -c "$script"
# if dist and build directories exist, remove them
if [ -d dist ]; then
rm -r dist
fi
set -eux
python -m build --wheel -o dist
rm -rf dist
python setup.py sdist --formats=gztar,zip
python -mpip install -U pip
python -mpip install -U build wheel
if [ $? -ne 0 ]; then
echo "Error: Failed to build the wheel."
exit 1
else
echo "Wheel built successfully."
fi
NO_VERSION_LABEL=1 python -m build --sdist
python -m build --wheel
echo "Wheel built successfully."
multi_python_version=("3.8" "3.9" "3.10" "3.11" "3.12")
for python_version in "${multi_python_version[@]}"; do
echo "Installing Python ${python_version}..."
apt-get install -y python${python_version}
done
pip install -r requirements-build.txt
# if dist and build directories exist, remove them
if [ -d dist ]; then
rm -r dist
fi
# Build source distribution (disabled for now)
# python setup.py sdist --formats=gztar,zip
# Build wheels for different Python versions
echo "Building wheels for multiple Python versions..."
tox -e py38,py39,py310,py311,py312
if [ $? -ne 0 ]; then
echo "Error: Failed to build the wheels."
exit 1
else
echo "Wheels built successfully."
fi
\ No newline at end of file
FROM nvidia/cuda:12.1.0-devel-ubuntu20.04
ENV DEBIAN_FRONTEND=noninteractive \
TZ=Etc/UTC
RUN set -eux; \
apt-get update; \
apt-get install -y software-properties-common; \
add-apt-repository ppa:ubuntu-toolchain-r/test -y; \
apt-get update; \
apt-get install -y wget curl libtinfo-dev zlib1g-dev libssl-dev build-essential \
libedit-dev libxml2-dev git; \
curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh; \
bash Miniconda3-latest-Linux-x86_64.sh -b -p /miniconda3; \
rm Miniconda3-latest-Linux-x86_64.sh;
RUN apt-get update && apt-get install -y ninja-build
ENV PATH=/miniconda3/bin/:$PATH
# ✅ Accept Anaconda Terms of Service for both required channels
RUN conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main; \
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r
# Create environments
RUN set -eux; \
conda create -n py38 python=3.8 -y; \
conda create -n py39 python=3.9 -y; \
conda create -n py310 python=3.10 -y; \
conda create -n py311 python=3.11 -y; \
conda create -n py312 python=3.12 -y; \
ln -sf /miniconda3/envs/py38/bin/python3.8 /usr/bin/python3.8; \
ln -sf /miniconda3/envs/py39/bin/python3.9 /usr/bin/python3.9; \
ln -sf /miniconda3/envs/py310/bin/python3.10 /usr/bin/python3.10; \
ln -sf /miniconda3/envs/py311/bin/python3.11 /usr/bin/python3.11; \
ln -sf /miniconda3/envs/py312/bin/python3.12 /usr/bin/python3.12; \
conda install -y cmake patchelf
WORKDIR /tilelang
FROM pytorch/manylinux-builder:cuda12.1
FROM pytorch/manylinux2_28-builder:cuda12.1 AS builder_amd64
ENV CUDA_VERSION=12.1 \
AUDITWHEEL_PLAT=manylinux_2_28_x86_64
RUN pip3 install uv
FROM pytorch/manylinuxaarch64-builder:cuda12.8 AS builder_arm64
ENV CUDA_VERSION=12.8 \
AUDITWHEEL_PLAT=manylinux_2_28_aarch64
FROM builder_${TARGETARCH}
ENV DEBIAN_FRONTEND=noninteractive \
TZ=Etc/UTC
RUN set -eux; \
yum -y update && yum install -y \
zlib-devel openssl-devel \
libedit-devel libxml2-devel \
bzip2 bzip2-devel xz xz-devel \
epel-release
uv venv -p 3.12 --seed /venv; \
git config --global --add safe.directory '/tilelang'
RUN set -eux; \
conda create -n py38 python=3.8 -y && \
conda create -n py39 python=3.9 -y && \
conda create -n py310 python=3.10 -y && \
conda create -n py311 python=3.11 -y && \
conda create -n py312 python=3.12 -y && \
ln -sf /opt/conda/envs/py38/bin/python3.8 /usr/bin/python3.8 && \
ln -sf /opt/conda/envs/py39/bin/python3.9 /usr/bin/python3.9 && \
ln -sf /opt/conda/envs/py310/bin/python3.10 /usr/bin/python3.10 && \
ln -sf /opt/conda/envs/py311/bin/python3.11 /usr/bin/python3.11 && \
ln -sf /opt/conda/envs/py312/bin/python3.12 /usr/bin/python3.12 && \
conda install -y cmake patchelf
ENV PATH="/venv/bin:$PATH" \
VIRTUAL_ENV=/venv
RUN uv pip install build wheel
WORKDIR /tilelang
# if dist and build directories exist, remove them
if [ -d dist ]; then
rm -r dist
fi
set -eux
if [ -d build ]; then
rm -r build
fi
rm -rf dist
PYPI_BUILD=TRUE WITH_COMMITID=FALSE python setup.py bdist_wheel --plat-name=manylinux1_x86_64
python -mpip install -U pip
python -mpip install -U build wheel auditwheel patchelf
export NO_VERSION_LABEL=1
python -m build --sdist -o dist
python -m build --wheel -o raw_dist
auditwheel repair -L /lib -w dist \
--exclude libcuda.so.1 --exclude /usr/local/cuda\* --exclude /opt/amdgpu\* \
--exclude /opt/rocm\* \
raw_dist/*.whl
echo "Wheel built successfully."
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment