setup.py 2.16 KB
Newer Older
Woosuk Kwon's avatar
Woosuk Kwon committed
1
import setuptools
Woosuk Kwon's avatar
Woosuk Kwon committed
2
import torch
Woosuk Kwon's avatar
Woosuk Kwon committed
3
4
5
6
7
from torch.utils import cpp_extension

CXX_FLAGS = ['-g']
NVCC_FLAGS = ['-O2']

Woosuk Kwon's avatar
Woosuk Kwon committed
8
9
10
11
12
13
14
15
16
17
18
19
if not torch.cuda.is_available():
    raise RuntimeError(
        f'Cannot find CUDA at CUDA_HOME: {cpp_extension.CUDA_HOME}. '
        'CUDA must be available in order to build the package.')

# FIXME(woosuk): Consider the case where the machine has multiple GPUs with
# different compute capabilities.
compute_capability = torch.cuda.get_device_capability()
major, minor = compute_capability
# Enable bfloat16 support if the compute capability is >= 8.0.
if major >= 8:
    NVCC_FLAGS.append('-DENABLE_BF16')
Woosuk Kwon's avatar
Woosuk Kwon committed
20
21
22
23
24

ext_modules = []

# Cache operations.
cache_extension = cpp_extension.CUDAExtension(
25
    name='cacheflow.cache_ops',
Woosuk Kwon's avatar
Woosuk Kwon committed
26
    sources=['csrc/cache.cpp', 'csrc/cache_kernels.cu'],
Woosuk Kwon's avatar
Woosuk Kwon committed
27
28
29
30
    extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(cache_extension)

31
32
33
# Attention kernels.
attention_extension = cpp_extension.CUDAExtension(
    name='cacheflow.attention_ops',
Woosuk Kwon's avatar
Woosuk Kwon committed
34
    sources=['csrc/attention.cpp', 'csrc/attention/attention_kernels.cu'],
35
36
37
38
    extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(attention_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
39
# Positional encoding kernels.
40
41
42
43
44
45
46
positional_encoding_extension = cpp_extension.CUDAExtension(
    name='cacheflow.pos_encoding_ops',
    sources=['csrc/pos_encoding.cpp', 'csrc/pos_encoding_kernels.cu'],
    extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(positional_encoding_extension)

47
48
49
50
51
52
53
54
# Layer normalization kernels.
layernorm_extension = cpp_extension.CUDAExtension(
    name='cacheflow.layernorm_ops',
    sources=['csrc/layernorm.cpp', 'csrc/layernorm_kernels.cu'],
    extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(layernorm_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
55
# Activation kernels.
Woosuk Kwon's avatar
Woosuk Kwon committed
56
57
58
59
60
61
62
activation_extension = cpp_extension.CUDAExtension(
    name='cacheflow.activation_ops',
    sources=['csrc/activation.cpp', 'csrc/activation_kernels.cu'],
    extra_compile_args={'cxx': CXX_FLAGS, 'nvcc': NVCC_FLAGS},
)
ext_modules.append(activation_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
63
64
65
66
67
setuptools.setup(
    name='cacheflow',
    ext_modules=ext_modules,
    cmdclass={'build_ext': cpp_extension.BuildExtension},
)