setup.py 1.07 KB
Newer Older
1
2
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
fxmarty's avatar
fxmarty committed
3
4
import torch

huangwb's avatar
huangwb committed
5
6
7
8
9
10
11
12
13
14
15
16
17
# Compiler flags.
CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
# TODO(woosuk): Should we use -O3?
NVCC_FLAGS = ["-O2", "-std=c++17","--gpu-max-threads-per-block=1024"]

ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]

extra_compile_args={
    "cxx": CXX_FLAGS,
    "nvcc": NVCC_FLAGS,
}
fxmarty's avatar
fxmarty committed
18
19
if not torch.version.hip:
    extra_compile_args.append("-arch=compute_80")
20
21
22
23
24
25
26

setup(
    name="custom_kernels",
    ext_modules=[
        CUDAExtension(
            name="custom_kernels.fused_bloom_attention_cuda",
            sources=["custom_kernels/fused_bloom_attention_cuda.cu"],
fxmarty's avatar
fxmarty committed
27
            extra_compile_args=extra_compile_args,
28
29
30
31
        ),
        CUDAExtension(
            name="custom_kernels.fused_attention_cuda",
            sources=["custom_kernels/fused_attention_cuda.cu"],
fxmarty's avatar
fxmarty committed
32
            extra_compile_args=extra_compile_args,
33
34
35
36
        ),
    ],
    cmdclass={"build_ext": BuildExtension},
)