Unverified Commit d89ba5b8 authored by Yichen Yan's avatar Yichen Yan Committed by GitHub
Browse files

[Build] Migrate to scikit-build-core (#939)



* cleanup

* init

* build first wheel that may not work

* build cython ext

* fix tvm build

* use sabi

* update rpath to support auditwheel

* pass editible build

* update ci

* fix warnings

* do not use ccache in self host runner

* test local uv cache

* test pip index

* update lib search to respect new lib location

* fix

* update ci

* enable cuda by default

* update src map

* fix

* fix

* fix

* Generate version with backend and git information at build time

* copy tvm_cython to wheels

* fix tvm lib search

* fmt

* remove unused

* auto detect ccache

* add back backend-related files

* remove jit cython adaptor to simplify code

* fmt

* fix ci

* ci fix 2

* ci fix 3

* workaround metal

* ci fix 4

* fmt

* fmt

* Revert "ci fix 4"

This reverts commit d1de8291c3e40927955f3ad3cf87a75c78813676.

* tmp

* fix metal

* trivial cleanup

* add detailed build-time version for cuda

* add back mlc

* Restore wheel info and other trivial updates

* update

* fix cuda

* upd

* fix metal ci

* test for ga build

* test for nvidia/cuda

* test ubuntu 20

* fix

* fix

* Do not use `uv build`

* fix

* fix

* log toolchain version

* merge wheel

* update

* debug

* fix

* update

* skip rocm

* update artifacts each

* fix

* fix

* add mac

* fix cache

* fix cache

* fix cache

* reset and add comment

* upd

* fix git version

* update deps

* trivial update

* use in-tree build dir and install to src to speedup editable build

* Revert "use in-tree build dir and install to src to speedup editable build"

This reverts commit 6ab87b05c5eed811210136b8dca4fc3677dd51f2.

* add build-dir

* update docs

* remove old scrips

* [1/n] cleanup scripts

* [Lint]: [pre-commit.ci] auto fixes [...]

* fix and update

* wait for tvm fix

* revert some tmp fix

* fix

* fix

* spell

* doc update

* test cibuildwheel

* fix and test macos on ci

* Update .github/workflows/dist.yml
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>

* fix

* test ga event

* cleanup

* bump tvm to support api3

* test final version

* add cron

* Update .github/workflows/dist.yml
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>

* fix

* test ccache for metal cibuildwheel

* test newer macos

* finish

---------
Co-authored-by: default avatarpre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: default avatarXuehai Pan <XuehaiPan@outlook.com>
parent bab57f23
multi_python_version=("3.8" "3.9" "3.10" "3.11" "3.12")
for python_version in "${multi_python_version[@]}"; do
echo "Installing Python ${python_version}..."
apt-get install -y python${python_version}
done
pip install -r requirements-build.txt
# if dist and build directories exist, remove them
if [ -d dist ]; then
rm -r dist
fi
# Build source distribution (disabled for now)
# python setup.py sdist --formats=gztar,zip
# Build wheels for different Python versions
echo "Building wheels for multiple Python versions..."
tox -e py38-pypi,py39-pypi,py310-pypi,py311-pypi,py312-pypi
if [ $? -ne 0 ]; then
echo "Error: Failed to build the wheels."
exit 1
else
echo "Wheels built successfully."
fi
\ No newline at end of file
[project]
name = "tilelang"
authors = [{name = "Tile-AI"}]
maintainers = [{name = "Lei Wang", email = "leiwang1999@outlook.com"}]
description = "A tile level programming language to generate high performance code."
readme.file = "README.md"
license = "MIT"
keywords = ["BLAS", "CUDA", "HIP", "Code Generation", "TVM"]
classifiers = [
"Environment :: GPU",
"Operating System :: POSIX :: Linux",
"Operating System :: OS Independent",
"Operating System :: MacOS",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Scientific/Engineering :: Artificial Intelligence",
]
readme.content-type = "text/markdown"
requires-python = ">=3.8"
dynamic = ["version"]
# Somehow this does not work, hard-code for now
# dynamic = ["version", "dependencies"]
# [tool.setuptools.dynamic]
# dependencies = {file = ["requirements.txt"]}
dependencies = [
"numpy>=1.23.5",
"tqdm>=4.62.3",
"typing_extensions>=4.10.0",
"cloudpickle",
"ml_dtypes",
"psutil",
"torch",
]
[project.optional-dependencies]
# mldtypes should be greater than 0.5.1
# if you want to enable fp4
fp4 = ["ml_dtypes>=0.5.1"]
[build-system]
requires = [
"build",
"cmake>=3.26",
"packaging",
"setuptools>=61",
"wheel",
"patchelf",
"setuptools>=63",
"Cython>=3.0.0",
"scikit-build-core",
]
build-backend = "setuptools.build_meta"
build-backend = "scikit_build_core.build"
[tool.scikit-build]
wheel.py-api = "cp38"
cmake.version = ">=3.26.1"
build-dir = "build"
# editable.rebuild = true
# Include backend and git info in version
metadata.version.provider = "version_provider"
metadata.version.provider-path = "."
experimental = true
[tool.scikit-build.wheel.packages]
tilelang = "tilelang"
"tilelang/src" = "src"
"tilelang/3rdparty" = "3rdparty"
# TODO: we might want to not include these in wheel?
"tilelang/benchmark" = "benchmark"
"tilelang/examples" = "examples"
"tilelang/testing" = "testing"
[tool.yapf]
based_on_style = "yapf"
......@@ -67,3 +132,50 @@ ignore = [
[tool.ruff.lint.per-file-ignores]
"3rdparty/**/*" = ["ALL"]
"examples/deepseek_v32/inference/**/*" = ["ALL"]
[tool.cibuildwheel]
archs = ["auto64"]
# wait for tvm fix
build = "cp38-*"
[tool.cibuildwheel.macos]
archs = ["arm64"]
[tool.cibuildwheel.linux]
# Pin to glibc 2.17 for x86 and 2.28 for aarch64 for now
manylinux-x86_64-image = "manylinux2014"
manylinux-aarch64-image = "manylinux_2_28"
skip = "*-musllinux*"
environment-pass = ["CUDA_VERSION"]
repair-wheel-command = [
"auditwheel repair --exclude libcuda.so.1 --exclude /usr/local/cuda\\* -w {dest_dir} {wheel}",
"pipx run abi3audit --strict --report {wheel}",
]
# Install CUDA runtime and stub driver library
# manylinux_2_28 uses gcc 14, which needs CUDA 12.8
before-all = """
set -eux
case "$(uname -m)" in
"x86_64")
yum-config-manager --add-repo https://developer.download.nvidia.cn/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo
;;
"aarch64")
dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo
;;
*)
exit 1
;;
esac
# Assume CUDA_VERSION=xx.y
v=${CUDA_VERSION:-12.4}
v=${v:0:4}
v=${v/./-}
yum install -y cuda-minimal-build-${v} cuda-driver-devel-${v} cuda-nvrtc-devel-${v}
"""
[tool.cibuildwheel.linux.environment]
# Equlivant to `source /opt/rh/gcc-toolset-12/enable`, safe when gcc-toolset-12 is not installed
PATH = "/usr/local/cuda/bin:$PATH"
# Should be mirrored in pyproject.toml
Cython>=3.0.0
build
cmake>=3.26
packaging
setuptools>=61
torch
wheel
tox
auditwheel
patchelf
ninja
# lint requirements
-r requirements-lint.txt
# build requirements
# Requirements to run local build with `--no-build-isolation` or other developments
Cython>=3.0.0
build
cmake>=3.26
# runtime requirements
cffi
cpplint
Cython
docutils
dtlib
numpy>=1.23.5
pytest>=6.2.4
pytest_xdist>=2.2.1
packaging>=21.0
PyYAML
tqdm>=4.62.3
typing_extensions>=4.10.0
requests
cloudpickle
ml_dtypes
psutil
scipy
packaging
setuptools>=61
torch
tabulate
wheel
setuptools
\ No newline at end of file
tox
ninja
auditwheel; platform_system == 'Linux'
patchelf; platform_system == 'Linux'
delocate; platform_system == 'Darwin'
# runtime requirements
Cython>=3.0.0
numpy>=1.23.5
tqdm>=4.62.3
typing_extensions>=4.10.0
cloudpickle
# mldtypes should be greater than 0.5.1
# if you want to enable fp4
ml_dtypes
psutil
torch
This diff is collapsed.
......@@ -5,6 +5,10 @@ import ctypes
import logging
from tqdm import tqdm
from importlib.metadata import version
__version__ = version('tilelang')
class TqdmLoggingHandler(logging.Handler):
"""Custom logging handler that directs log output to tqdm progress bar to avoid interference."""
......@@ -57,9 +61,10 @@ from .env import enable_cache, disable_cache, is_cache_enabled # noqa: F401
from .env import env as env # noqa: F401
import tvm
import tvm.base
import tvm.base # noqa: F401
from tvm import DataType # noqa: F401
# Setup tvm search path before importing tvm
from . import libinfo
......@@ -71,8 +76,8 @@ def _load_tile_lang_lib():
# pylint: disable=protected-access
lib_name = "tilelang" if tvm.base._RUNTIME_ONLY else "tilelang_module"
# pylint: enable=protected-access
lib_path = libinfo.find_lib_path(lib_name, optional=False)
return ctypes.CDLL(lib_path[0]), lib_path[0]
lib_path = libinfo.find_lib_path(lib_name)
return ctypes.CDLL(lib_path), lib_path
# only load once here
......@@ -101,8 +106,6 @@ from .transform import PassConfigKey # noqa: F401
from .engine import lower, register_cuda_postproc, register_hip_postproc # noqa: F401
from .version import __version__ # noqa: F401
from .math import * # noqa: F403
from . import ir # noqa: F401
......
......@@ -30,7 +30,7 @@ from tilelang.autotuner.param import CompileArgs, ProfileArgs, AutotuneResult
from tilelang.autotuner.capture import get_autotune_inputs
from tilelang.utils.target import determine_target
from tilelang.jit.param import _P, _RProg
from tilelang.version import __version__
from tilelang import __version__
class TimeoutException(Exception):
......
......@@ -16,7 +16,7 @@ from tvm.tir import PrimFunc
from tilelang.engine.param import KernelParam
from tilelang import env
from tilelang.jit import JITKernel
from tilelang.version import __version__
from tilelang import __version__
KERNEL_PATH = "kernel.cu"
WRAPPED_KERNEL_PATH = "wrapped_kernel.cu"
......
......@@ -4,6 +4,7 @@ import pathlib
import logging
import shutil
import glob
import site
from dataclasses import dataclass
from typing import Optional
......@@ -19,6 +20,19 @@ TL_TEMPLATE_NOT_FOUND_MESSAGE = ("TileLang is not installed or found in the expe
", which may lead to compilation bugs when utilize tilelang backend."
TVM_LIBRARY_NOT_FOUND_MESSAGE = ("TVM is not installed or found in the expected path")
SITE_PACKAGES = site.getsitepackages()
TL_LIBS = [os.path.join(i, 'tilelang/lib') for i in site.getsitepackages()]
TL_LIBS = [i for i in TL_LIBS if os.path.exists(i)]
TL_ROOT = os.path.dirname(os.path.abspath(__file__))
DEV = False
THIRD_PARTY_ROOT = os.path.join(TL_ROOT, '3rdparty')
if not os.path.exists(THIRD_PARTY_ROOT):
DEV = True
THIRD_PARTY_ROOT = os.path.join(TL_ROOT, '..', '3rdparty')
def _find_cuda_home() -> str:
"""Find the CUDA install path.
......@@ -261,85 +275,51 @@ env = Environment()
CUDA_HOME = env.CUDA_HOME
ROCM_HOME = env.ROCM_HOME
def prepend_pythonpath(path):
if not os.environ.get("PYTHONPATH", None):
os.environ["PYTHONPATH"] = path
else:
os.environ["PYTHONPATH"] = path + os.pathsep + os.environ["PYTHONPATH"]
sys.path.insert(0, path)
# Initialize TVM paths
if env.TVM_IMPORT_PYTHON_PATH is not None:
os.environ["PYTHONPATH"] = env.TVM_IMPORT_PYTHON_PATH + ":" + os.environ.get("PYTHONPATH", "")
sys.path.insert(0, env.TVM_IMPORT_PYTHON_PATH)
prepend_pythonpath(env.TVM_IMPORT_PYTHON_PATH)
else:
install_tvm_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "3rdparty", "tvm")
if os.path.exists(install_tvm_path) and install_tvm_path not in sys.path:
os.environ["PYTHONPATH"] = (
install_tvm_path + "/python:" + os.environ.get("PYTHONPATH", ""))
sys.path.insert(0, install_tvm_path + "/python")
env.TVM_IMPORT_PYTHON_PATH = install_tvm_path + "/python"
develop_tvm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "3rdparty", "tvm")
if os.path.exists(develop_tvm_path) and develop_tvm_path not in sys.path:
os.environ["PYTHONPATH"] = (
develop_tvm_path + "/python:" + os.environ.get("PYTHONPATH", ""))
sys.path.insert(0, develop_tvm_path + "/python")
env.TVM_IMPORT_PYTHON_PATH = develop_tvm_path + "/python"
develop_tvm_library_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "build", "tvm")
install_tvm_library_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lib")
tvm_path = os.path.join(THIRD_PARTY_ROOT, "tvm")
assert os.path.exists(tvm_path), tvm_path
if tvm_path not in sys.path:
tvm_python_binding = os.path.join(tvm_path, 'python')
prepend_pythonpath(tvm_python_binding)
env.TVM_IMPORT_PYTHON_PATH = tvm_python_binding
if os.environ.get("TVM_LIBRARY_PATH") is None:
if os.path.exists(develop_tvm_library_path):
os.environ["TVM_LIBRARY_PATH"] = develop_tvm_library_path
elif os.path.exists(install_tvm_library_path):
os.environ["TVM_LIBRARY_PATH"] = install_tvm_library_path
else:
logger.warning(TVM_LIBRARY_NOT_FOUND_MESSAGE)
# pip install build library path
lib_path = os.path.join(env.TILELANG_PACKAGE_PATH, "lib")
existing_path = os.environ.get("TVM_LIBRARY_PATH")
if existing_path:
os.environ["TVM_LIBRARY_PATH"] = f"{existing_path}:{lib_path}"
else:
os.environ["TVM_LIBRARY_PATH"] = lib_path
env.TVM_LIBRARY_PATH = os.environ.get("TVM_LIBRARY_PATH", None)
os.environ['TVM_LIBRARY_PATH'] = env.TVM_LIBRARY_PATH = os.pathsep.join(TL_LIBS)
# Initialize CUTLASS paths
if os.environ.get("TL_CUTLASS_PATH", None) is None:
install_cutlass_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "3rdparty", "cutlass")
develop_cutlass_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "3rdparty", "cutlass")
if os.path.exists(install_cutlass_path):
os.environ["TL_CUTLASS_PATH"] = install_cutlass_path + "/include"
env.CUTLASS_INCLUDE_DIR = install_cutlass_path + "/include"
elif (os.path.exists(develop_cutlass_path) and develop_cutlass_path not in sys.path):
os.environ["TL_CUTLASS_PATH"] = develop_cutlass_path + "/include"
env.CUTLASS_INCLUDE_DIR = develop_cutlass_path + "/include"
cutlass_inc_path = os.path.join(THIRD_PARTY_ROOT, 'cutlass', 'include')
if os.path.exists(cutlass_inc_path):
os.environ["TL_CUTLASS_PATH"] = env.CUTLASS_INCLUDE_DIR = cutlass_inc_path
else:
logger.warning(CUTLASS_NOT_FOUND_MESSAGE)
# Initialize COMPOSABLE_KERNEL paths
if os.environ.get("TL_COMPOSABLE_KERNEL_PATH", None) is None:
install_ck_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "3rdparty", "composable_kernel")
develop_ck_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "3rdparty", "composable_kernel")
if os.path.exists(install_ck_path):
os.environ["TL_COMPOSABLE_KERNEL_PATH"] = install_ck_path + "/include"
env.COMPOSABLE_KERNEL_INCLUDE_DIR = install_ck_path + "/include"
elif (os.path.exists(develop_ck_path) and develop_ck_path not in sys.path):
os.environ["TL_COMPOSABLE_KERNEL_PATH"] = develop_ck_path + "/include"
env.COMPOSABLE_KERNEL_INCLUDE_DIR = develop_ck_path + "/include"
ck_inc_path = os.path.join(THIRD_PARTY_ROOT, 'composable_kernel', 'include')
if os.path.exists(ck_inc_path):
os.environ["TL_COMPOSABLE_KERNEL_PATH"] = env.COMPOSABLE_KERNEL_INCLUDE_DIR = ck_inc_path
else:
logger.warning(COMPOSABLE_KERNEL_NOT_FOUND_MESSAGE)
# Initialize TL_TEMPLATE_PATH
if os.environ.get("TL_TEMPLATE_PATH", None) is None:
install_tl_template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src")
develop_tl_template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "src")
if os.path.exists(install_tl_template_path):
os.environ["TL_TEMPLATE_PATH"] = install_tl_template_path
env.TILELANG_TEMPLATE_PATH = install_tl_template_path
elif (os.path.exists(develop_tl_template_path) and develop_tl_template_path not in sys.path):
os.environ["TL_TEMPLATE_PATH"] = develop_tl_template_path
env.TILELANG_TEMPLATE_PATH = develop_tl_template_path
tl_template_path = os.path.join(THIRD_PARTY_ROOT, "..", "src")
if os.path.exists(tl_template_path):
os.environ["TL_TEMPLATE_PATH"] = env.TILELANG_TEMPLATE_PATH = tl_template_path
else:
logger.warning(TL_TEMPLATE_NOT_FOUND_MESSAGE)
......
"""The profiler and convert to torch utils"""
import ctypes
import fcntl
import hashlib
import logging
import site
import sys
import sysconfig
import torch
import os
from pathlib import Path
from typing import List, Optional, Union, Callable, Dict, Tuple, Any
from tilelang import tvm as tvm
......@@ -25,155 +18,15 @@ from tilelang.jit.adapter.utils import is_cuda_target, is_hip_target, is_cpu_tar
from tilelang.utils.target import determine_target
from tilelang.utils.language import retrieve_func_from_module
from tilelang.utils.tensor import map_torch_type
from tilelang.contrib.cc import get_cplus_compiler, is_darwin
logger = logging.getLogger(__name__)
def get_cython_compiler() -> Optional[str]:
"""Return the path to the Cython compiler.
Returns
-------
out: Optional[str]
The path to the Cython compiler, or None if none was found.
"""
cython_names = ["cython", "cython3"]
# Check system PATH
dirs_in_path = list(os.get_exec_path())
# Add user site-packages bin directory
user_base = site.getuserbase()
if user_base:
user_bin = os.path.join(user_base, "bin")
if os.path.exists(user_bin):
dirs_in_path = [user_bin] + dirs_in_path
# If in a virtual environment, add its bin directory
if sys.prefix != sys.base_prefix:
venv_bin = os.path.join(sys.prefix, "bin")
if os.path.exists(venv_bin):
dirs_in_path = [venv_bin] + dirs_in_path
for cython_name in cython_names:
for d in dirs_in_path:
cython_path = os.path.join(d, cython_name)
if os.path.isfile(cython_path) and os.access(cython_path, os.X_OK):
return cython_path
return None
# Add cache management functions at module level
def get_cache_dir() -> Path:
"""Get the cache directory for the current Python version."""
py_version = f"py{sys.version_info.major}{sys.version_info.minor}"
# current directory
current_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = Path(current_dir) / ".cycache" / py_version
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
def get_cached_lib(source_code: str) -> Tuple[Optional[ctypes.CDLL], Path]:
"""Try to load cached library or return None if not found."""
code_hash = hashlib.sha256(source_code.encode()).hexdigest()
cache_path = get_cache_dir() / f"{code_hash}.so"
lock_file = cache_path.with_suffix('.lock')
with open(lock_file, 'w') as lock:
fcntl.flock(lock.fileno(), fcntl.LOCK_EX)
try:
if cache_path.exists():
try:
if cache_path.stat().st_size > 1024:
return ctypes.CDLL(str(cache_path)), cache_path
else:
cache_path.unlink() # remove the incomplete file
except Exception as e:
logger.error(f"Failed to load cached library: {e}")
return None, cache_path
return None, cache_path
finally:
fcntl.flock(lock.fileno(), fcntl.LOCK_UN)
# read the cython_wrapper.pyx file
current_dir = os.path.dirname(os.path.abspath(__file__))
cython_wrapper_path = os.path.join(current_dir, "cython_wrapper.pyx")
with open(cython_wrapper_path, "r") as f:
cython_wrapper_code = f.read()
cache_dir = get_cache_dir()
source_path = cache_dir / "cython_wrapper.cpp"
library_path = cache_dir / "cython_wrapper.so"
md5_path = cache_dir / "md5.txt"
code_hash = hashlib.sha256(cython_wrapper_code.encode()).hexdigest()
cache_path = cache_dir / f"{code_hash}.so"
lock_file = cache_path.with_suffix('.lock')
# Check if cached version exists and is valid
need_compile = True
if md5_path.exists() and library_path.exists():
with open(md5_path, "r") as f:
cached_hash = f.read().strip()
if cached_hash == code_hash:
logger.debug("Cython JIT adapter is up to date, no need to compile...")
need_compile = False
else:
logger.info("Cython JIT adapter is out of date, need to recompile...")
else:
logger.info("No cached version found for Cython JIT adapter, need to compile...")
if need_compile:
logger.info("Waiting for lock to compile Cython JIT adapter...")
with open(lock_file, 'w') as lock:
fcntl.flock(lock.fileno(), fcntl.LOCK_EX)
try:
# After acquiring the lock, check again if the file has been compiled by another process
if md5_path.exists() and library_path.exists():
with open(md5_path, "r") as f:
cached_hash = f.read().strip()
if cached_hash == code_hash:
logger.info(
"Another process has already compiled the file, using it...")
need_compile = False
if need_compile:
logger.info("Compiling Cython JIT adapter...")
temp_path = cache_dir / f"temp_{code_hash}.so"
with open(md5_path, "w") as f:
f.write(code_hash)
# compile the cython_wrapper.pyx file into .cpp
cython = get_cython_compiler()
if cython is None:
raise Exception("Cython is not installed, please install it first.")
os.system(f"{cython} {cython_wrapper_path} --cplus -o {source_path}")
python_include_path = sysconfig.get_path("include")
cc = get_cplus_compiler()
dynamic_flag = '-Wl,-undefined,dynamic_lookup' if is_darwin(
) else '-Wl,--unresolved-symbols=ignore-all'
command = f"{cc} -shared -pthread -fPIC -fwrapv -O2 -Wall -fno-strict-aliasing {dynamic_flag} -I{python_include_path} {source_path} -o {temp_path}"
os.system(command)
# rename the temp file to the library file
temp_path.rename(library_path)
except Exception as e:
if 'temp_path' in locals() and temp_path.exists():
temp_path.unlink()
raise Exception(f"Failed to compile Cython JIT adapter: {e}") from e
finally:
if lock_file.exists():
lock_file.unlink()
# add the .so file to the sys.path
cache_dir_str = str(cache_dir)
if cache_dir_str not in sys.path:
sys.path.append(cache_dir_str)
from cython_wrapper import CythonKernelWrapper
try:
# Load cython_wrapper.api3.so in env.py
from cython_wrapper import CythonKernelWrapper
except ImportError:
# TODO: tolerance a build without cython backend
raise
class CythonKernelAdapter(BaseKernelAdapter):
......
"""Library information. This is a standalone file that can be used to get various info.
Modified from: https://github.com/mlc-ai/mlc-llm/blob/main/python/mlc_llm/libinfo.py
"""
#! pylint: disable=protected-access
import os
import sys
import os
TILELANG_LIBRARY_PATH = os.environ.get("TILELANG_LIBRARY_PATH", None)
def get_env_paths(env_var, splitter):
"""Get path in env variable"""
if os.environ.get(env_var, None):
return [p.strip() for p in os.environ[env_var].split(splitter)]
return []
from .env import TL_LIBS
def get_dll_directories():
"""Get extra tile lang dll directories"""
curr_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.abspath(os.path.join(curr_dir, ".."))
dll_path = [
curr_dir,
os.path.join(source_dir, "build"), # local build
os.path.join(source_dir, "build", "Release"),
os.path.join(curr_dir, "lib"), # pypi build
]
if TILELANG_LIBRARY_PATH:
dll_path.append(TILELANG_LIBRARY_PATH)
if "CONDA_PREFIX" in os.environ:
dll_path.append(os.path.join(os.environ["CONDA_PREFIX"], "lib"))
if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
dll_path.extend(get_env_paths("LD_LIBRARY_PATH", ":"))
elif sys.platform.startswith("darwin"):
dll_path.extend(get_env_paths("DYLD_LIBRARY_PATH", ":"))
elif sys.platform.startswith("win32"):
dll_path.extend(get_env_paths("PATH", ";"))
return [os.path.abspath(p) for p in dll_path if os.path.isdir(p)]
def find_lib_path(name, optional=False):
def find_lib_path(name: str, py_ext=False):
"""Find tile lang library
Parameters
......@@ -50,7 +15,9 @@ def find_lib_path(name, optional=False):
optional: boolean
Whether the library is required
"""
if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
if py_ext:
lib_name = f"{name}.abi3.so"
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
lib_name = f"lib{name}.so"
elif sys.platform.startswith("win32"):
lib_name = f"{name}.dll"
......@@ -59,11 +26,11 @@ def find_lib_path(name, optional=False):
else:
lib_name = f"lib{name}.so"
dll_paths = get_dll_directories()
lib_dll_path = [os.path.join(p, lib_name) for p in dll_paths]
lib_found = [p for p in lib_dll_path if os.path.exists(p) and os.path.isfile(p)]
if not lib_found and not optional:
for lib_root in TL_LIBS:
lib_dll_path = os.path.join(lib_root, lib_name)
if os.path.exists(lib_dll_path) and os.path.isfile(lib_dll_path):
return lib_dll_path
else:
message = (f"Cannot find libraries: {lib_name}\n" + "List of candidates:\n" +
"\n".join(lib_dll_path))
"\n".join(TL_LIBS))
raise RuntimeError(message)
return lib_found
import os
import subprocess
from typing import Union
# Get the absolute path of the current Python script's directory
current_dir = os.path.dirname(os.path.abspath(__file__))
# Get the absolute path of the project root directory (one level above the current directory)
develop_project_root_dir = os.path.abspath(os.path.join(current_dir, ".."))
installed_project_root_dir = os.path.abspath(os.path.join(current_dir))
# Define the path to the VERSION file located in the project root directory
develop_version_file_path = os.path.join(develop_project_root_dir, "VERSION")
installed_version_file_path = os.path.join(installed_project_root_dir, "VERSION")
if os.path.exists(develop_version_file_path):
version_file_path = develop_version_file_path
elif os.path.exists(installed_version_file_path):
version_file_path = installed_version_file_path
else:
raise FileNotFoundError("VERSION file not found in the project root directory")
# Read and store the version information from the VERSION file
# Use 'strip()' to remove any leading/trailing whitespace or newline characters
with open(version_file_path, "r") as version_file:
__version__ = version_file.read().strip()
def get_git_commit_id() -> Union[str, None]:
"""Get the current git commit hash by running git in the current file's directory."""
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(os.path.abspath(__file__)),
stderr=subprocess.DEVNULL,
encoding='utf-8').strip()
# FileNotFoundError is raised when git is not installed
except (subprocess.SubprocessError, FileNotFoundError):
return None
# Append git commit hash to version if not already present
# NOTE(lei): Although the local commit id cannot capture locally staged changes,
# the local commit id can help mitigate issues caused by incorrect cache to some extent,
# so it should still be kept.
# Check WITH_COMMITID environment variable to control whether to include commit ID
WITH_COMMITID = os.environ.get("WITH_COMMITID", "True").lower() == "true"
if WITH_COMMITID and "+" not in __version__ and (commit_id := get_git_commit_id()):
# Use short commit ID (8 characters) for better compatibility
short_commit_id = commit_id[:8]
__version__ = f"{__version__}+{short_commit_id}"
# Define the public API for the module
__all__ = ["__version__"]
[tox]
envlist = py38,py39,py310,py311,py312
isolated_build = False
[testenv:py{38,39,310,311,312}]
skip_install = false
deps =
wheel
build
setenv =
WITH_COMMITID = TRUE
PYTHON_EXECUTABLE = {envpython}
Python3_EXECUTABLE = {envpython}
commands =
python -m build --wheel -o {toxinidir}/dist
[testenv:py{38,39,310,311,312}-pypi]
skip_install = false
setenv =
PYPI_BUILD = TRUE
WITH_COMMITID = FALSE
PYTHON_EXECUTABLE = {envpython}
Python3_EXECUTABLE = {envpython}
commands =
python setup.py bdist_wheel --plat-name=manylinux2014_x86_64
[testenv:audit_manylinux2014]
skip_install = true
allowlist_externals =
bash
deps =
auditwheel
patchelf
commands =
bash -c 'auditwheel repair -L=/lib --exclude=/usr/local/cuda* --exclude=libcuda.so.1 --plat=manylinux2014_x86_64 dist/*'
[testenv:py38]
basepython = python3.8
[testenv:py39]
basepython = python3.9
[testenv:py310]
basepython = python3.10
[testenv:py311]
basepython = python3.11
[testenv:py312]
basepython = python3.12
from __future__ import annotations
import os
import platform
import subprocess
from typing import Optional
from pathlib import Path
ROOT = Path(__file__).parent
base_version = (ROOT / 'VERSION').read_text().strip()
def _read_cmake_bool(i: str | None, default=False):
if i is None:
return default
return i.lower() not in ('0', 'false', 'off', 'no', 'n', '')
def get_git_commit_id() -> Optional[str]:
"""Get the current git commit hash by running git in the current file's directory."""
r = subprocess.run(['git', 'rev-parse', 'HEAD'],
cwd=ROOT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8')
if r.returncode == 0:
return r.stdout.strip()
else:
return 'unknown'
def dynamic_metadata(
field: str,
settings: dict[str, object] | None = None,
) -> str:
assert field == 'version'
version = base_version
if not _read_cmake_bool(os.environ.get('NO_VERSION_LABEL')):
exts = []
backend = None
if _read_cmake_bool(os.environ.get('NO_TOOLCHAIN_VERSION')):
pass
elif platform.system() == 'Darwin':
# only on macosx_11_0_arm64, not necessary
# backend = 'metal'
pass
elif _read_cmake_bool(os.environ.get('USE_ROCM', '')):
backend = 'rocm'
elif 'USE_CUDA' in os.environ and not _read_cmake_bool(os.environ.get('USE_CUDA')):
backend = 'cpu'
else: # cuda
# Read nvcc version from env.
# This is not exactly how it should be,
# but works for now if building in a nvidia/cuda image.
if cuda_version := os.environ.get('CUDA_VERSION'):
major, minor, *_ = cuda_version.split('.')
backend = f'cu{major}{minor}'
else:
backend = 'cuda'
if backend:
exts.append(backend)
if _read_cmake_bool(os.environ.get('NO_GIT_VERSION')):
pass
elif git_hash := get_git_commit_id():
exts.append(f'git{git_hash[:8]}')
if exts:
version += '+' + '.'.join(exts)
return version
__all__ = ["dynamic_metadata"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment