setup.py 9.15 KB
Newer Older
1
2
3
import io
import os
import re
4
5
import subprocess
from typing import List, Set
6
import warnings
7

8
from packaging.version import parse, Version
Woosuk Kwon's avatar
Woosuk Kwon committed
9
import setuptools
Woosuk Kwon's avatar
Woosuk Kwon committed
10
import torch
11
12
13
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME

ROOT_DIR = os.path.dirname(__file__)
14

15
16
17
# Supported NVIDIA GPU architectures.
SUPPORTED_ARCHS = ["7.0", "7.5", "8.0", "8.6", "8.9", "9.0"]

18
# Compiler flags.
19
CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
20
# TODO(woosuk): Should we use -O3?
21
NVCC_FLAGS = ["-O2", "-std=c++17"]
Woosuk Kwon's avatar
Woosuk Kwon committed
22

23
24
25
ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
26

Cody Yu's avatar
Cody Yu committed
27
if CUDA_HOME is None:
Woosuk Kwon's avatar
Woosuk Kwon committed
28
    raise RuntimeError(
Woosuk Kwon's avatar
Woosuk Kwon committed
29
        "Cannot find CUDA_HOME. CUDA must be available to build the package.")
Woosuk Kwon's avatar
Woosuk Kwon committed
30

31
32
33
34
35
36
37
38
39
40
41
42
43
44

def get_nvcc_cuda_version(cuda_dir: str) -> Version:
    """Get the CUDA version from nvcc.

    Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
    """
    nvcc_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
                                          universal_newlines=True)
    output = nvcc_output.split()
    release_idx = output.index("release") + 1
    nvcc_cuda_version = parse(output[release_idx].split(",")[0])
    return nvcc_cuda_version


45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def get_torch_arch_list() -> Set[str]:
    # TORCH_CUDA_ARCH_LIST can have one or more architectures,
    # e.g. "8.0" or "7.5,8.0,8.6+PTX". Here, the "8.6+PTX" option asks the
    # compiler to additionally include PTX code that can be runtime-compiled
    # and executed on the 8.6 or newer architectures. While the PTX code will
    # not give the best performance on the newer architectures, it provides
    # forward compatibility.
    valid_arch_strs = SUPPORTED_ARCHS + [s + "+PTX" for s in SUPPORTED_ARCHS]
    arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
    if arch_list is None:
        return set()

    # List are separated by ; or space.
    arch_list = arch_list.replace(" ", ";").split(";")
    for arch in arch_list:
        if arch not in valid_arch_strs:
            raise ValueError(
                f"Unsupported CUDA arch ({arch}). "
                f"Valid CUDA arch strings are: {valid_arch_strs}.")
    return set(arch_list)


# First, check the TORCH_CUDA_ARCH_LIST environment variable.
compute_capabilities = get_torch_arch_list()
if not compute_capabilities:
    # If TORCH_CUDA_ARCH_LIST is not defined or empty, target all available
    # GPUs on the current machine.
    device_count = torch.cuda.device_count()
    for i in range(device_count):
        major, minor = torch.cuda.get_device_capability(i)
        if major < 7:
            raise RuntimeError(
                "GPUs with compute capability below 7.0 are not supported.")
        compute_capabilities.add(f"{major}.{minor}")
79
80

nvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)
81
82
83
84
85
86
87
88
89
90
91
if not compute_capabilities:
    # If no GPU is specified nor available, add all supported architectures
    # based on the NVCC CUDA version.
    compute_capabilities = set(SUPPORTED_ARCHS)
    if nvcc_cuda_version < Version("11.1"):
        compute_capabilities.remove("8.6")
    if nvcc_cuda_version < Version("11.8"):
        compute_capabilities.remove("8.9")
        compute_capabilities.remove("9.0")

# Validate the NVCC CUDA version.
92
93
if nvcc_cuda_version < Version("11.0"):
    raise RuntimeError("CUDA 11.0 or higher is required to build the package.")
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
if nvcc_cuda_version < Version("11.1"):
    if any(cc.startswith("8.6") for cc in compute_capabilities):
        raise RuntimeError(
            "CUDA 11.1 or higher is required for compute capability 8.6.")
if nvcc_cuda_version < Version("11.8"):
    if any(cc.startswith("8.9") for cc in compute_capabilities):
        # CUDA 11.8 is required to generate the code targeting compute capability 8.9.
        # However, GPUs with compute capability 8.9 can also run the code generated by
        # the previous versions of CUDA 11 and targeting compute capability 8.0.
        # Therefore, if CUDA 11.8 is not available, we target compute capability 8.0
        # instead of 8.9.
        warnings.warn(
            "CUDA 11.8 or higher is required for compute capability 8.9. "
            "Targeting compute capability 8.0 instead.")
        compute_capabilities = set(cc for cc in compute_capabilities
                                   if not cc.startswith("8.9"))
        compute_capabilities.add("8.0+PTX")
    if any(cc.startswith("9.0") for cc in compute_capabilities):
        raise RuntimeError(
            "CUDA 11.8 or higher is required for compute capability 9.0.")
114
115
116

# Add target compute capabilities to NVCC flags.
for capability in compute_capabilities:
117
118
119
120
    num = capability[0] + capability[2]
    NVCC_FLAGS += ["-gencode", f"arch=compute_{num},code=sm_{num}"]
    if capability.endswith("+PTX"):
        NVCC_FLAGS += ["-gencode", f"arch=compute_{num},code=compute_{num}"]
121

Woosuk Kwon's avatar
Woosuk Kwon committed
122
123
124
125
126
# Use NVCC threads to parallelize the build.
if nvcc_cuda_version >= Version("11.2"):
    num_threads = min(os.cpu_count(), 8)
    NVCC_FLAGS += ["--threads", str(num_threads)]

Woosuk Kwon's avatar
Woosuk Kwon committed
127
128
129
ext_modules = []

# Cache operations.
130
cache_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
131
    name="vllm.cache_ops",
132
    sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
133
134
135
136
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
Woosuk Kwon's avatar
Woosuk Kwon committed
137
138
139
)
ext_modules.append(cache_extension)

140
# Attention kernels.
141
attention_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
142
    name="vllm.attention_ops",
143
    sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
144
145
146
147
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
148
149
150
)
ext_modules.append(attention_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
151
# Positional encoding kernels.
152
positional_encoding_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
153
    name="vllm.pos_encoding_ops",
154
    sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
155
156
157
158
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
159
160
161
)
ext_modules.append(positional_encoding_extension)

162
# Layer normalization kernels.
163
layernorm_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
164
    name="vllm.layernorm_ops",
165
    sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
166
167
168
169
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
170
171
172
)
ext_modules.append(layernorm_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
173
# Activation kernels.
174
activation_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
175
    name="vllm.activation_ops",
176
    sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
177
178
179
180
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
Woosuk Kwon's avatar
Woosuk Kwon committed
181
182
183
)
ext_modules.append(activation_extension)

184
185
186
187
188
189
190
191
192
193
194
195
196
197
# Quantization kernels.
quantization_extension = CUDAExtension(
    name="vllm.quantization_ops",
    sources=[
        "csrc/quantization.cpp",
        "csrc/quantization/awq/gemm_kernels.cu",
    ],
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
)
ext_modules.append(quantization_extension)

198
199
200
201
202
203
204
205
206
207
208
# Misc. CUDA utils.
cuda_utils_extension = CUDAExtension(
    name="vllm.cuda_utils",
    sources=["csrc/cuda_utils.cpp", "csrc/cuda_utils_kernels.cu"],
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
)
ext_modules.append(cuda_utils_extension)

209

210
211
212
213
214
215
216
217
218
219
def get_path(*filepath) -> str:
    return os.path.join(ROOT_DIR, *filepath)


def find_version(filepath: str):
    """Extract version information from the given filepath.

    Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
    """
    with open(filepath) as fp:
Woosuk Kwon's avatar
Woosuk Kwon committed
220
221
        version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
                                  fp.read(), re.M)
222
223
224
225
226
227
228
229
230
231
        if version_match:
            return version_match.group(1)
        raise RuntimeError("Unable to find version string.")


def read_readme() -> str:
    """Read the README file."""
    return io.open(get_path("README.md"), "r", encoding="utf-8").read()


232
233
def get_requirements() -> List[str]:
    """Get Python package dependencies from requirements.txt."""
234
    with open(get_path("requirements.txt")) as f:
235
236
237
238
        requirements = f.read().strip().split("\n")
    return requirements


Woosuk Kwon's avatar
Woosuk Kwon committed
239
setuptools.setup(
Woosuk Kwon's avatar
Woosuk Kwon committed
240
241
242
    name="vllm",
    version=find_version(get_path("vllm", "__init__.py")),
    author="vLLM Team",
243
    license="Apache 2.0",
Woosuk Kwon's avatar
Woosuk Kwon committed
244
245
    description=("A high-throughput and memory-efficient inference and "
                 "serving engine for LLMs"),
246
247
    long_description=read_readme(),
    long_description_content_type="text/markdown",
248
    url="https://github.com/vllm-project/vllm",
249
    project_urls={
250
251
        "Homepage": "https://github.com/vllm-project/vllm",
        "Documentation": "https://vllm.readthedocs.io/en/latest/",
252
253
254
255
256
    },
    classifiers=[
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "Programming Language :: Python :: 3.10",
Woosuk Kwon's avatar
Woosuk Kwon committed
257
        "Programming Language :: Python :: 3.11",
258
259
260
        "License :: OSI Approved :: Apache Software License",
        "Topic :: Scientific/Engineering :: Artificial Intelligence",
    ],
Woosuk Kwon's avatar
Woosuk Kwon committed
261
262
    packages=setuptools.find_packages(exclude=("benchmarks", "csrc", "docs",
                                               "examples", "tests")),
263
264
    python_requires=">=3.8",
    install_requires=get_requirements(),
Woosuk Kwon's avatar
Woosuk Kwon committed
265
    ext_modules=ext_modules,
266
    cmdclass={"build_ext": BuildExtension},
Woosuk Kwon's avatar
Woosuk Kwon committed
267
)