setup.py 9.38 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
5
6
import re
import ast
Tri Dao's avatar
Tri Dao committed
7
from pathlib import Path
Tri Dao's avatar
Tri Dao committed
8
from packaging.version import parse, Version
Tri Dao's avatar
Tri Dao committed
9
10
11
12
13
14
15
16
17
18
19

from setuptools import setup, find_packages
import subprocess

import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME


with open("README.md", "r", encoding="utf-8") as fh:
    long_description = fh.read()

Tri Dao's avatar
Tri Dao committed
20
21
22
23
24
25
26
27
28

# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))


def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
Tri Dao's avatar
Tri Dao committed
29
    bare_metal_version = parse(output[release_idx].split(",")[0])
Tri Dao's avatar
Tri Dao committed
30

Tri Dao's avatar
Tri Dao committed
31
    return raw_output, bare_metal_version
Tri Dao's avatar
Tri Dao committed
32
33
34


def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
Tri Dao's avatar
Tri Dao committed
35
36
    raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_version = parse(torch.version.cuda)
Tri Dao's avatar
Tri Dao committed
37
38
39
40

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

Tri Dao's avatar
Tri Dao committed
41
    if (bare_metal_version != torch_binary_version):
Tri Dao's avatar
Tri Dao committed
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
Tri Dao's avatar
Tri Dao committed
63
64
    _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
    if bare_metal_version >= Version("11.2"):
Tri Dao's avatar
Tri Dao committed
65
66
67
68
69
70
71
72
73
74
75
        return nvcc_extra_args + ["--threads", "4"]
    return nvcc_extra_args


if not torch.cuda.is_available():
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
Tri Dao's avatar
Tri Dao committed
76
77
        "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
        "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
Tri Dao's avatar
Tri Dao committed
78
79
80
81
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
Tri Dao's avatar
Tri Dao committed
82
83
84
85
86
87
88
89
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
        _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
        if bare_metal_version >= Version("11.8"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
        elif bare_metal_version >= Version("11.1"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
        elif bare_metal_version == Version("11.0"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
Tri Dao's avatar
Tri Dao committed
90
        else:
Tri Dao's avatar
Tri Dao committed
91
92
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"

Tri Dao's avatar
Tri Dao committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])

cmdclass = {}
ext_modules = []

# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
    generator_flag = ["-DOLD_GENERATOR_PATH"]

Tri Dao's avatar
Tri Dao committed
108
raise_if_cuda_home_none("flash_attn")
Tri Dao's avatar
Tri Dao committed
109
110
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
Tri Dao's avatar
Tri Dao committed
111
112
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
113
    raise RuntimeError("FlashAttention is only supported on CUDA 11 and above")
Tri Dao's avatar
Tri Dao committed
114
115
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_75,code=sm_75")
Tri Dao's avatar
Tri Dao committed
116
117
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
Tri Dao's avatar
Tri Dao committed
118
119
120
if bare_metal_version >= Version("11.8"):
    cc_flag.append("-gencode")
    cc_flag.append("arch=compute_90,code=sm_90")
Tri Dao's avatar
Tri Dao committed
121

Tri Dao's avatar
Tri Dao committed
122
subprocess.run(["git", "submodule", "update", "--init", "csrc/cutlass"])
Tri Dao's avatar
Tri Dao committed
123
124
ext_modules.append(
    CUDAExtension(
Tri Dao's avatar
Tri Dao committed
125
        name="flash_attn_2_cuda",
Tri Dao's avatar
Tri Dao committed
126
        sources=[
Tri Dao's avatar
Tri Dao committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
            "csrc/flash_attn/flash_api.cpp",
            "csrc/flash_attn/src/flash_fwd_hdim32_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim32_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim64_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim64_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim96_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim96_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim128_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim128_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim160_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim160_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim192_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim192_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim224_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim224_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim256_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_fwd_hdim256_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim32_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim32_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim64_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim64_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim96_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim96_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim128_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim128_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim160_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim160_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim192_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim192_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim224_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim224_bf16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim256_fp16_sm80.cu",
            "csrc/flash_attn/src/flash_bwd_hdim256_bf16_sm80.cu",
Tri Dao's avatar
Tri Dao committed
160
161
        ],
        extra_compile_args={
162
            "cxx": ["-O3", "-std=c++17"] + generator_flag,
Tri Dao's avatar
Tri Dao committed
163
164
165
            "nvcc": append_nvcc_threads(
                [
                    "-O3",
166
                    "-std=c++17",
Tri Dao's avatar
Tri Dao committed
167
168
                    "-U__CUDA_NO_HALF_OPERATORS__",
                    "-U__CUDA_NO_HALF_CONVERSIONS__",
169
170
                    "-U__CUDA_NO_HALF2_OPERATORS__",
                    "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
Tri Dao's avatar
Tri Dao committed
171
172
173
174
                    "--expt-relaxed-constexpr",
                    "--expt-extended-lambda",
                    "--use_fast_math",
                    "--ptxas-options=-v",
175
                    # "--ptxas-options=-O2",
Tri Dao's avatar
Tri Dao committed
176
177
178
179
180
181
182
                    "-lineinfo"
                ]
                + generator_flag
                + cc_flag
            ),
        },
        include_dirs=[
Tri Dao's avatar
Tri Dao committed
183
184
            Path(this_dir) / 'csrc' / 'flash_attn',
            Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Tri Dao's avatar
Tri Dao committed
185
            Path(this_dir) / 'csrc' / 'cutlass' / 'include',
Tri Dao's avatar
Tri Dao committed
186
187
188
189
        ],
    )
)

Tri Dao's avatar
Tri Dao committed
190

191
192
193
194
195
196
197
198
199
200
def get_package_version():
    with open(Path(this_dir) / "flash_attn" / "__init__.py", "r") as f:
        version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
    public_version = ast.literal_eval(version_match.group(1))
    local_version = os.environ.get("FLASH_ATTN_LOCAL_VERSION")
    if local_version:
        return f"{public_version}+{local_version}"
    else:
        return str(public_version)

Tri Dao's avatar
Tri Dao committed
201

Tri Dao's avatar
Tri Dao committed
202
setup(
Tri Dao's avatar
Tri Dao committed
203
    name="flash_attn",
204
    version=get_package_version(),
Tri Dao's avatar
Tri Dao committed
205
206
207
208
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
    ),
    author="Tri Dao",
Tri Dao's avatar
Tri Dao committed
209
    author_email="trid@cs.stanford.edu",
Tri Dao's avatar
Tri Dao committed
210
    description="Flash Attention: Fast and Memory-Efficient Exact Attention",
Tri Dao's avatar
Tri Dao committed
211
    url="https://github.com/Dao-AILab/flash-attention",
Tri Dao's avatar
Tri Dao committed
212
213
    classifiers=[
        "Programming Language :: Python :: 3",
214
        "License :: OSI Approved :: BSD License",
Phil Wang's avatar
Phil Wang committed
215
        "Operating System :: Unix",
Tri Dao's avatar
Tri Dao committed
216
    ],
Tri Dao's avatar
Tri Dao committed
217
218
    ext_modules=ext_modules,
    cmdclass={"build_ext": BuildExtension} if ext_modules else {},
Gustaf's avatar
Gustaf committed
219
220
221
222
    python_requires=">=3.7",
    install_requires=[
        "torch",
        "einops",
Pavel Shvets's avatar
Pavel Shvets committed
223
        "packaging",
224
        "ninja",
Gustaf's avatar
Gustaf committed
225
    ],
Tri Dao's avatar
Tri Dao committed
226
)