setup.py 2.51 KB
Newer Older
1
2
from typing import List

Woosuk Kwon's avatar
Woosuk Kwon committed
3
import setuptools
Woosuk Kwon's avatar
Woosuk Kwon committed
4
import torch
5
6
7
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME

Woosuk Kwon's avatar
Woosuk Kwon committed
8

9
10
11
12
# Build custom operators.
CXX_FLAGS = ["-g"]
# TODO(woosuk): Should we use -O3?
NVCC_FLAGS = ["-O2"]
Woosuk Kwon's avatar
Woosuk Kwon committed
13

Woosuk Kwon's avatar
Woosuk Kwon committed
14
15
if not torch.cuda.is_available():
    raise RuntimeError(
16
17
        f"Cannot find CUDA at CUDA_HOME: {CUDA_HOME}. "
        "CUDA must be available in order to build the package.")
Woosuk Kwon's avatar
Woosuk Kwon committed
18
19
20
21
22
23
24

# FIXME(woosuk): Consider the case where the machine has multiple GPUs with
# different compute capabilities.
compute_capability = torch.cuda.get_device_capability()
major, minor = compute_capability
# Enable bfloat16 support if the compute capability is >= 8.0.
if major >= 8:
25
    NVCC_FLAGS.append("-DENABLE_BF16")
Woosuk Kwon's avatar
Woosuk Kwon committed
26
27
28
29

ext_modules = []

# Cache operations.
30
31
32
33
cache_extension = CUDAExtension(
    name="cacheflow.cache_ops",
    sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"],
    extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
Woosuk Kwon's avatar
Woosuk Kwon committed
34
35
36
)
ext_modules.append(cache_extension)

37
# Attention kernels.
38
39
40
41
attention_extension = CUDAExtension(
    name="cacheflow.attention_ops",
    sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"],
    extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
42
43
44
)
ext_modules.append(attention_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
45
# Positional encoding kernels.
46
47
48
49
positional_encoding_extension = CUDAExtension(
    name="cacheflow.pos_encoding_ops",
    sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"],
    extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
50
51
52
)
ext_modules.append(positional_encoding_extension)

53
# Layer normalization kernels.
54
55
56
57
layernorm_extension = CUDAExtension(
    name="cacheflow.layernorm_ops",
    sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"],
    extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
58
59
60
)
ext_modules.append(layernorm_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
61
# Activation kernels.
62
63
64
65
activation_extension = CUDAExtension(
    name="cacheflow.activation_ops",
    sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"],
    extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS},
Woosuk Kwon's avatar
Woosuk Kwon committed
66
67
68
)
ext_modules.append(activation_extension)

69
70
71
72
73
74
75
76

def get_requirements() -> List[str]:
    """Get Python package dependencies from requirements.txt."""
    with open("requirements.txt") as f:
        requirements = f.read().strip().split("\n")
    return requirements


Woosuk Kwon's avatar
Woosuk Kwon committed
77
setuptools.setup(
78
79
80
    name="cacheflow",
    python_requires=">=3.8",
    install_requires=get_requirements(),
Woosuk Kwon's avatar
Woosuk Kwon committed
81
    ext_modules=ext_modules,
82
    cmdclass={"build_ext": BuildExtension},
Woosuk Kwon's avatar
Woosuk Kwon committed
83
)