setup.py 9.87 KB
Newer Older
1
2
3
import io
import os
import re
4
5
import subprocess
from typing import List, Set
6
import warnings
7

8
from packaging.version import parse, Version
Woosuk Kwon's avatar
Woosuk Kwon committed
9
import setuptools
Woosuk Kwon's avatar
Woosuk Kwon committed
10
import torch
11
12
13
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME

ROOT_DIR = os.path.dirname(__file__)
14

15
# Supported NVIDIA GPU architectures.
16
SUPPORTED_ARCHS = {"7.0", "7.5", "8.0", "8.6", "8.9", "9.0"}
17

18
# Compiler flags.
19
CXX_FLAGS = ["-g", "-O2", "-std=c++17"]
20
# TODO(woosuk): Should we use -O3?
21
NVCC_FLAGS = ["-O2", "-std=c++17"]
Woosuk Kwon's avatar
Woosuk Kwon committed
22

23
24
25
ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0
CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"]
26

Cody Yu's avatar
Cody Yu committed
27
if CUDA_HOME is None:
Woosuk Kwon's avatar
Woosuk Kwon committed
28
    raise RuntimeError(
Woosuk Kwon's avatar
Woosuk Kwon committed
29
        "Cannot find CUDA_HOME. CUDA must be available to build the package.")
Woosuk Kwon's avatar
Woosuk Kwon committed
30

31
32
33
34
35
36
37
38
39
40
41
42
43
44

def get_nvcc_cuda_version(cuda_dir: str) -> Version:
    """Get the CUDA version from nvcc.

    Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
    """
    nvcc_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
                                          universal_newlines=True)
    output = nvcc_output.split()
    release_idx = output.index("release") + 1
    nvcc_cuda_version = parse(output[release_idx].split(",")[0])
    return nvcc_cuda_version


45
46
47
48
49
50
51
def get_torch_arch_list() -> Set[str]:
    # TORCH_CUDA_ARCH_LIST can have one or more architectures,
    # e.g. "8.0" or "7.5,8.0,8.6+PTX". Here, the "8.6+PTX" option asks the
    # compiler to additionally include PTX code that can be runtime-compiled
    # and executed on the 8.6 or newer architectures. While the PTX code will
    # not give the best performance on the newer architectures, it provides
    # forward compatibility.
52
53
    env_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
    if env_arch_list is None:
54
55
56
        return set()

    # List are separated by ; or space.
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    torch_arch_list = set(env_arch_list.replace(" ", ";").split(";"))
    if not torch_arch_list:
        return set()

    # Filter out the invalid architectures and print a warning.
    valid_archs = SUPPORTED_ARCHS.union({s + "+PTX" for s in SUPPORTED_ARCHS})
    arch_list = torch_arch_list.intersection(valid_archs)
    # If none of the specified architectures are valid, raise an error.
    if not arch_list:
        raise RuntimeError(
            "None of the CUDA architectures in `TORCH_CUDA_ARCH_LIST` env "
            f"variable ({env_arch_list}) is supported. "
            f"Supported CUDA architectures are: {valid_archs}.")
    invalid_arch_list = torch_arch_list - valid_archs
    if invalid_arch_list:
        warnings.warn(
            f"Unsupported CUDA architectures ({invalid_arch_list}) are "
            "excluded from the `TORCH_CUDA_ARCH_LIST` env variable "
            f"({env_arch_list}). Supported CUDA architectures are: "
            f"{valid_archs}.")
    return arch_list
78
79
80
81
82
83
84
85
86
87
88
89
90
91


# First, check the TORCH_CUDA_ARCH_LIST environment variable.
compute_capabilities = get_torch_arch_list()
if not compute_capabilities:
    # If TORCH_CUDA_ARCH_LIST is not defined or empty, target all available
    # GPUs on the current machine.
    device_count = torch.cuda.device_count()
    for i in range(device_count):
        major, minor = torch.cuda.get_device_capability(i)
        if major < 7:
            raise RuntimeError(
                "GPUs with compute capability below 7.0 are not supported.")
        compute_capabilities.add(f"{major}.{minor}")
92
93

nvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)
94
95
96
if not compute_capabilities:
    # If no GPU is specified nor available, add all supported architectures
    # based on the NVCC CUDA version.
97
    compute_capabilities = SUPPORTED_ARCHS.copy()
98
99
100
101
102
103
104
    if nvcc_cuda_version < Version("11.1"):
        compute_capabilities.remove("8.6")
    if nvcc_cuda_version < Version("11.8"):
        compute_capabilities.remove("8.9")
        compute_capabilities.remove("9.0")

# Validate the NVCC CUDA version.
105
106
if nvcc_cuda_version < Version("11.0"):
    raise RuntimeError("CUDA 11.0 or higher is required to build the package.")
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
if nvcc_cuda_version < Version("11.1"):
    if any(cc.startswith("8.6") for cc in compute_capabilities):
        raise RuntimeError(
            "CUDA 11.1 or higher is required for compute capability 8.6.")
if nvcc_cuda_version < Version("11.8"):
    if any(cc.startswith("8.9") for cc in compute_capabilities):
        # CUDA 11.8 is required to generate the code targeting compute capability 8.9.
        # However, GPUs with compute capability 8.9 can also run the code generated by
        # the previous versions of CUDA 11 and targeting compute capability 8.0.
        # Therefore, if CUDA 11.8 is not available, we target compute capability 8.0
        # instead of 8.9.
        warnings.warn(
            "CUDA 11.8 or higher is required for compute capability 8.9. "
            "Targeting compute capability 8.0 instead.")
        compute_capabilities = set(cc for cc in compute_capabilities
                                   if not cc.startswith("8.9"))
        compute_capabilities.add("8.0+PTX")
    if any(cc.startswith("9.0") for cc in compute_capabilities):
        raise RuntimeError(
            "CUDA 11.8 or higher is required for compute capability 9.0.")
127
128
129

# Add target compute capabilities to NVCC flags.
for capability in compute_capabilities:
130
131
132
133
    num = capability[0] + capability[2]
    NVCC_FLAGS += ["-gencode", f"arch=compute_{num},code=sm_{num}"]
    if capability.endswith("+PTX"):
        NVCC_FLAGS += ["-gencode", f"arch=compute_{num},code=compute_{num}"]
134

Woosuk Kwon's avatar
Woosuk Kwon committed
135
136
137
138
139
# Use NVCC threads to parallelize the build.
if nvcc_cuda_version >= Version("11.2"):
    num_threads = min(os.cpu_count(), 8)
    NVCC_FLAGS += ["--threads", str(num_threads)]

Woosuk Kwon's avatar
Woosuk Kwon committed
140
141
142
ext_modules = []

# Cache operations.
143
cache_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
144
    name="vllm.cache_ops",
145
    sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
146
147
148
149
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
Woosuk Kwon's avatar
Woosuk Kwon committed
150
151
152
)
ext_modules.append(cache_extension)

153
# Attention kernels.
154
attention_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
155
    name="vllm.attention_ops",
156
    sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
157
158
159
160
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
161
162
163
)
ext_modules.append(attention_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
164
# Positional encoding kernels.
165
positional_encoding_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
166
    name="vllm.pos_encoding_ops",
167
    sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
168
169
170
171
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
172
173
174
)
ext_modules.append(positional_encoding_extension)

175
# Layer normalization kernels.
176
layernorm_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
177
    name="vllm.layernorm_ops",
178
    sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
179
180
181
182
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
183
184
185
)
ext_modules.append(layernorm_extension)

Woosuk Kwon's avatar
Woosuk Kwon committed
186
# Activation kernels.
187
activation_extension = CUDAExtension(
Woosuk Kwon's avatar
Woosuk Kwon committed
188
    name="vllm.activation_ops",
189
    sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"],
Woosuk Kwon's avatar
Woosuk Kwon committed
190
191
192
193
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
Woosuk Kwon's avatar
Woosuk Kwon committed
194
195
196
)
ext_modules.append(activation_extension)

197
198
199
200
201
202
# Quantization kernels.
quantization_extension = CUDAExtension(
    name="vllm.quantization_ops",
    sources=[
        "csrc/quantization.cpp",
        "csrc/quantization/awq/gemm_kernels.cu",
chooper1's avatar
chooper1 committed
203
        "csrc/quantization/squeezellm/quant_cuda_kernel.cu",
204
205
206
207
208
209
210
211
    ],
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
)
ext_modules.append(quantization_extension)

212
213
214
215
216
217
218
219
220
221
222
# Misc. CUDA utils.
cuda_utils_extension = CUDAExtension(
    name="vllm.cuda_utils",
    sources=["csrc/cuda_utils.cpp", "csrc/cuda_utils_kernels.cu"],
    extra_compile_args={
        "cxx": CXX_FLAGS,
        "nvcc": NVCC_FLAGS,
    },
)
ext_modules.append(cuda_utils_extension)

223

224
225
226
227
228
229
230
231
232
233
def get_path(*filepath) -> str:
    return os.path.join(ROOT_DIR, *filepath)


def find_version(filepath: str):
    """Extract version information from the given filepath.

    Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
    """
    with open(filepath) as fp:
Woosuk Kwon's avatar
Woosuk Kwon committed
234
235
        version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
                                  fp.read(), re.M)
236
237
238
239
240
241
242
243
244
245
        if version_match:
            return version_match.group(1)
        raise RuntimeError("Unable to find version string.")


def read_readme() -> str:
    """Read the README file."""
    return io.open(get_path("README.md"), "r", encoding="utf-8").read()


246
247
def get_requirements() -> List[str]:
    """Get Python package dependencies from requirements.txt."""
248
    with open(get_path("requirements.txt")) as f:
249
250
251
252
        requirements = f.read().strip().split("\n")
    return requirements


Woosuk Kwon's avatar
Woosuk Kwon committed
253
setuptools.setup(
Woosuk Kwon's avatar
Woosuk Kwon committed
254
255
256
    name="vllm",
    version=find_version(get_path("vllm", "__init__.py")),
    author="vLLM Team",
257
    license="Apache 2.0",
Woosuk Kwon's avatar
Woosuk Kwon committed
258
259
    description=("A high-throughput and memory-efficient inference and "
                 "serving engine for LLMs"),
260
261
    long_description=read_readme(),
    long_description_content_type="text/markdown",
262
    url="https://github.com/vllm-project/vllm",
263
    project_urls={
264
265
        "Homepage": "https://github.com/vllm-project/vllm",
        "Documentation": "https://vllm.readthedocs.io/en/latest/",
266
267
268
269
270
    },
    classifiers=[
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "Programming Language :: Python :: 3.10",
Woosuk Kwon's avatar
Woosuk Kwon committed
271
        "Programming Language :: Python :: 3.11",
272
273
274
        "License :: OSI Approved :: Apache Software License",
        "Topic :: Scientific/Engineering :: Artificial Intelligence",
    ],
Woosuk Kwon's avatar
Woosuk Kwon committed
275
276
    packages=setuptools.find_packages(exclude=("benchmarks", "csrc", "docs",
                                               "examples", "tests")),
277
278
    python_requires=">=3.8",
    install_requires=get_requirements(),
Woosuk Kwon's avatar
Woosuk Kwon committed
279
    ext_modules=ext_modules,
280
    cmdclass={"build_ext": BuildExtension},
281
    package_data={"vllm": ["py.typed"]},
Woosuk Kwon's avatar
Woosuk Kwon committed
282
)