setup.py 913 Bytes
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
fxmarty's avatar
fxmarty committed
3
4
import torch

huangwb's avatar
huangwb committed
5
6
7
8
# Compiler flags.
CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
# TODO(woosuk): Should we use -O3?
NVCC_FLAGS = ["-O2", "-std=c++17","--gpu-max-threads-per-block=1024"]
fxmarty's avatar
fxmarty committed
9

huangwb's avatar
huangwb committed
10
11
12
ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
fxmarty's avatar
fxmarty committed
13

huangwb's avatar
huangwb committed
14
15
16
extra_compile_args={
    "cxx": CXX_FLAGS,
    "nvcc": NVCC_FLAGS,
fxmarty's avatar
fxmarty committed
17
}
Nicolas Patry's avatar
Nicolas Patry committed
18
19
20
21
22
23
24
25
26
27
28

setup(
    name="exllamav2_kernels",
    ext_modules=[
        CUDAExtension(
            name="exllamav2_kernels",
            sources=[
                "exllamav2_kernels/ext.cpp",
                "exllamav2_kernels/cuda/q_matrix.cu",
                "exllamav2_kernels/cuda/q_gemm.cu",
            ],
fxmarty's avatar
fxmarty committed
29
            extra_compile_args=extra_compile_args,
Nicolas Patry's avatar
Nicolas Patry committed
30
31
32
33
        )
    ],
    cmdclass={"build_ext": BuildExtension},
)