setup.py 9.66 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
5
6
import re
import ast
Tri Dao's avatar
Tri Dao committed
7
from pathlib import Path
Tri Dao's avatar
Tri Dao committed
8
from packaging.version import parse, Version
Tri Dao's avatar
Tri Dao committed
9
10
11
12

from setuptools import setup, find_packages
import subprocess

13
import urllib
Tri Dao's avatar
Tri Dao committed
14
15
16
17
18
19
20
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME


with open("README.md", "r", encoding="utf-8") as fh:
    long_description = fh.read()

Tri Dao's avatar
Tri Dao committed
21
22
23
24
25

# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))


26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def get_platform():
    """
    Returns the platform string.
    """
    if sys.platform.startswith('linux'):
        return 'linux_x86_64'
    elif sys.platform == 'darwin':
        return 'macosx_10_9_x86_64'
    elif sys.platform == 'win32':
        return 'win_amd64'
    else:
        raise ValueError('Unsupported platform: {}'.format(sys.platform))

from setuptools.command.install import install

# @pierce - TODO: Remove for proper release
BASE_WHEEL_URL = "https://github.com/piercefreeman/flash-attention/releases/download/{tag_name}/{wheel_name}"

class CustomInstallCommand(install):
    def run(self):
        # Determine the version numbers that will be used to determine the correct wheel
        _, cuda_version = get_cuda_bare_metal_version()
        torch_version = torch.__version__
        python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
        platform_name = get_platform()
        flash_version = get_package_version()

        # Determine wheel URL based on CUDA version, torch version, python version and OS
        wheel_filename = f'flash_attn-{flash_version}+cu{cuda_version}torch{torch_version}-{python_version}-{python_version}-{platform_name}.whl'
        wheel_url = BASE_WHEEL_URL.format(
            tag_name=f"v{flash_version}",
            wheel_name=wheel_filename
        )
        
        try:
            urllib.request.urlretrieve(wheel_url, wheel_filename)
            os.system(f'pip install {wheel_filename}')
            os.remove(wheel_filename)
        except urllib.error.HTTPError:
            print("Precompiled wheel not found. Building from source...")
            # If the wheel could not be downloaded, build from source
            install.run(self)


Tri Dao's avatar
Tri Dao committed
70
71
72
73
def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
Tri Dao's avatar
Tri Dao committed
74
    bare_metal_version = parse(output[release_idx].split(",")[0])
Tri Dao's avatar
Tri Dao committed
75

Tri Dao's avatar
Tri Dao committed
76
    return raw_output, bare_metal_version
Tri Dao's avatar
Tri Dao committed
77
78
79


def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
Tri Dao's avatar
Tri Dao committed
80
81
    raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_version = parse(torch.version.cuda)
Tri Dao's avatar
Tri Dao committed
82
83
84
85

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

Tri Dao's avatar
Tri Dao committed
86
    if (bare_metal_version != torch_binary_version):
Tri Dao's avatar
Tri Dao committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
Tri Dao's avatar
Tri Dao committed
108
109
    _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
    if bare_metal_version >= Version("11.2"):
Tri Dao's avatar
Tri Dao committed
110
111
112
113
114
115
116
117
118
119
120
        return nvcc_extra_args + ["--threads", "4"]
    return nvcc_extra_args


if not torch.cuda.is_available():
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
Tri Dao's avatar
Tri Dao committed
121
122
        "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
        "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
Tri Dao's avatar
Tri Dao committed
123
124
125
126
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
Tri Dao's avatar
Tri Dao committed
127
128
129
130
131
132
133
134
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
        _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
        if bare_metal_version >= Version("11.8"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
        elif bare_metal_version >= Version("11.1"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
        elif bare_metal_version == Version("11.0"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
Tri Dao's avatar
Tri Dao committed
135
        else:
Tri Dao's avatar
Tri Dao committed
136
137
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"

Tri Dao's avatar
Tri Dao committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])

cmdclass = {}
ext_modules = []

# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
    generator_flag = ["-DOLD_GENERATOR_PATH"]

Tri Dao's avatar
Tri Dao committed
153
raise_if_cuda_home_none("flash_attn")
Tri Dao's avatar
Tri Dao committed
154
155
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
Tri Dao's avatar
Tri Dao committed
156
157
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
158
    raise RuntimeError("FlashAttention is only supported on CUDA 11 and above")
Tri Dao's avatar
Tri Dao committed
159
160
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
Tri Dao's avatar
Tri Dao committed
161
162
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
Tri Dao's avatar
Tri Dao committed
163
164
165
if bare_metal_version >= Version("11.8"):
    cc_flag.append("-gencode")
    cc_flag.append("arch=compute_90,code=sm_90")
Tri Dao's avatar
Tri Dao committed
166

Tri Dao's avatar
Tri Dao committed
167
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
Tri Dao's avatar
Tri Dao committed
168
169
ext_modules.append(
    CUDAExtension(
Tri Dao's avatar
Tri Dao committed
170
        name="flash_attn_cuda",
Tri Dao's avatar
Tri Dao committed
171
        sources=[
Tri Dao's avatar
Tri Dao committed
172
            "csrc/flash_attn/fmha_api.cpp",
173
174
175
176
177
178
            "csrc/flash_attn/src/fmha_fwd_hdim32.cu",
            "csrc/flash_attn/src/fmha_fwd_hdim64.cu",
            "csrc/flash_attn/src/fmha_fwd_hdim128.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim32.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim64.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim128.cu",
Tri Dao's avatar
Tri Dao committed
179
180
            "csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
            "csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
Tri Dao's avatar
Tri Dao committed
181
182
        ],
        extra_compile_args={
183
            "cxx": ["-O3", "-std=c++17"] + generator_flag,
Tri Dao's avatar
Tri Dao committed
184
185
186
            "nvcc": append_nvcc_threads(
                [
                    "-O3",
187
                    "-std=c++17",
Tri Dao's avatar
Tri Dao committed
188
189
                    "-U__CUDA_NO_HALF_OPERATORS__",
                    "-U__CUDA_NO_HALF_CONVERSIONS__",
190
191
                    "-U__CUDA_NO_HALF2_OPERATORS__",
                    "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
Tri Dao's avatar
Tri Dao committed
192
193
194
195
196
197
198
199
200
201
202
                    "--expt-relaxed-constexpr",
                    "--expt-extended-lambda",
                    "--use_fast_math",
                    "--ptxas-options=-v",
                    "-lineinfo"
                ]
                + generator_flag
                + cc_flag
            ),
        },
        include_dirs=[
Tri Dao's avatar
Tri Dao committed
203
204
            Path(this_dir) / 'csrc' / 'flash_attn',
            Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Tri Dao's avatar
Tri Dao committed
205
            Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
Tri Dao's avatar
Tri Dao committed
206
207
208
209
        ],
    )
)

210
211
212
213
214
215
216
217
218
219
def get_package_version():
    with open(Path(this_dir) / "flash_attn" / "__init__.py", "r") as f:
        version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
    public_version = ast.literal_eval(version_match.group(1))
    local_version = os.environ.get("FLASH_ATTN_LOCAL_VERSION")
    if local_version:
        return f"{public_version}+{local_version}"
    else:
        return str(public_version)

Tri Dao's avatar
Tri Dao committed
220
setup(
Tri Dao's avatar
Tri Dao committed
221
    name="flash_attn",
222
    version=get_package_version(),
Tri Dao's avatar
Tri Dao committed
223
224
225
226
227
228
229
230
231
232
233
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
    ),
    author="Tri Dao",
    author_email="trid@stanford.edu",
    description="Flash Attention: Fast and Memory-Efficient Exact Attention",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://github.com/HazyResearch/flash-attention",
    classifiers=[
        "Programming Language :: Python :: 3",
234
        "License :: OSI Approved :: BSD License",
Phil Wang's avatar
Phil Wang committed
235
        "Operating System :: Unix",
Tri Dao's avatar
Tri Dao committed
236
    ],
Tri Dao's avatar
Tri Dao committed
237
    ext_modules=ext_modules,
238
239
240
241
242
243
    cmdclass={
        'install': CustomInstallCommand,
        "build_ext": BuildExtension
    } if ext_modules else {
        'install': CustomInstallCommand,
    },
Gustaf's avatar
Gustaf committed
244
245
246
247
    python_requires=">=3.7",
    install_requires=[
        "torch",
        "einops",
Pavel Shvets's avatar
Pavel Shvets committed
248
        "packaging",
249
        "ninja",
Gustaf's avatar
Gustaf committed
250
    ],
Tri Dao's avatar
Tri Dao committed
251
)