setup.py 9.8 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
5
6
import re
import ast
Tri Dao's avatar
Tri Dao committed
7
from pathlib import Path
Tri Dao's avatar
Tri Dao committed
8
from packaging.version import parse, Version
Tri Dao's avatar
Tri Dao committed
9
10
11
12

from setuptools import setup, find_packages
import subprocess

Pierce Freeman's avatar
Pierce Freeman committed
13
14
import urllib.request
import urllib.error
Tri Dao's avatar
Tri Dao committed
15
16
17
18
19
20
21
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME


with open("README.md", "r", encoding="utf-8") as fh:
    long_description = fh.read()

Tri Dao's avatar
Tri Dao committed
22
23
24
25
26

# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))


27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def get_platform():
    """
    Returns the platform string.
    """
    if sys.platform.startswith('linux'):
        return 'linux_x86_64'
    elif sys.platform == 'darwin':
        return 'macosx_10_9_x86_64'
    elif sys.platform == 'win32':
        return 'win_amd64'
    else:
        raise ValueError('Unsupported platform: {}'.format(sys.platform))

from setuptools.command.install import install

# @pierce - TODO: Remove for proper release
BASE_WHEEL_URL = "https://github.com/piercefreeman/flash-attention/releases/download/{tag_name}/{wheel_name}"

class CustomInstallCommand(install):
    def run(self):
Pierce Freeman's avatar
Pierce Freeman committed
47
48
        raise_if_cuda_home_none("flash_attn")

49
        # Determine the version numbers that will be used to determine the correct wheel
Pierce Freeman's avatar
Pierce Freeman committed
50
        _, cuda_version = get_cuda_bare_metal_version(CUDA_HOME)
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
        torch_version = torch.__version__
        python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
        platform_name = get_platform()
        flash_version = get_package_version()

        # Determine wheel URL based on CUDA version, torch version, python version and OS
        wheel_filename = f'flash_attn-{flash_version}+cu{cuda_version}torch{torch_version}-{python_version}-{python_version}-{platform_name}.whl'
        wheel_url = BASE_WHEEL_URL.format(
            tag_name=f"v{flash_version}",
            wheel_name=wheel_filename
        )
        
        try:
            urllib.request.urlretrieve(wheel_url, wheel_filename)
            os.system(f'pip install {wheel_filename}')
            os.remove(wheel_filename)
        except urllib.error.HTTPError:
            print("Precompiled wheel not found. Building from source...")
            # If the wheel could not be downloaded, build from source
Pierce Freeman's avatar
Pierce Freeman committed
70
71
72
73
            #install.run(self)
            raise ValueError

        raise ValueError
74
75


Tri Dao's avatar
Tri Dao committed
76
77
78
79
def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
Tri Dao's avatar
Tri Dao committed
80
    bare_metal_version = parse(output[release_idx].split(",")[0])
Tri Dao's avatar
Tri Dao committed
81

Tri Dao's avatar
Tri Dao committed
82
    return raw_output, bare_metal_version
Tri Dao's avatar
Tri Dao committed
83
84
85


def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
Tri Dao's avatar
Tri Dao committed
86
87
    raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_version = parse(torch.version.cuda)
Tri Dao's avatar
Tri Dao committed
88
89
90
91

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

Tri Dao's avatar
Tri Dao committed
92
    if (bare_metal_version != torch_binary_version):
Tri Dao's avatar
Tri Dao committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
Tri Dao's avatar
Tri Dao committed
114
115
    _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
    if bare_metal_version >= Version("11.2"):
Tri Dao's avatar
Tri Dao committed
116
117
118
119
120
121
122
123
124
125
126
        return nvcc_extra_args + ["--threads", "4"]
    return nvcc_extra_args


if not torch.cuda.is_available():
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
Tri Dao's avatar
Tri Dao committed
127
128
        "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
        "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
Tri Dao's avatar
Tri Dao committed
129
130
131
132
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
Tri Dao's avatar
Tri Dao committed
133
134
135
136
137
138
139
140
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
        _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
        if bare_metal_version >= Version("11.8"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
        elif bare_metal_version >= Version("11.1"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
        elif bare_metal_version == Version("11.0"):
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
Tri Dao's avatar
Tri Dao committed
141
        else:
Tri Dao's avatar
Tri Dao committed
142
143
            os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"

Tri Dao's avatar
Tri Dao committed
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])

cmdclass = {}
ext_modules = []

# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
    generator_flag = ["-DOLD_GENERATOR_PATH"]

Tri Dao's avatar
Tri Dao committed
159
raise_if_cuda_home_none("flash_attn")
Tri Dao's avatar
Tri Dao committed
160
161
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
Tri Dao's avatar
Tri Dao committed
162
163
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.0"):
164
    raise RuntimeError("FlashAttention is only supported on CUDA 11 and above")
Tri Dao's avatar
Tri Dao committed
165
166
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
Tri Dao's avatar
Tri Dao committed
167
168
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
Tri Dao's avatar
Tri Dao committed
169
170
171
if bare_metal_version >= Version("11.8"):
    cc_flag.append("-gencode")
    cc_flag.append("arch=compute_90,code=sm_90")
Tri Dao's avatar
Tri Dao committed
172

Tri Dao's avatar
Tri Dao committed
173
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
Tri Dao's avatar
Tri Dao committed
174
175
ext_modules.append(
    CUDAExtension(
Tri Dao's avatar
Tri Dao committed
176
        name="flash_attn_cuda",
Tri Dao's avatar
Tri Dao committed
177
        sources=[
Tri Dao's avatar
Tri Dao committed
178
            "csrc/flash_attn/fmha_api.cpp",
179
180
181
182
183
184
            "csrc/flash_attn/src/fmha_fwd_hdim32.cu",
            "csrc/flash_attn/src/fmha_fwd_hdim64.cu",
            "csrc/flash_attn/src/fmha_fwd_hdim128.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim32.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim64.cu",
            "csrc/flash_attn/src/fmha_bwd_hdim128.cu",
Tri Dao's avatar
Tri Dao committed
185
186
            "csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
            "csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
Tri Dao's avatar
Tri Dao committed
187
188
        ],
        extra_compile_args={
189
            "cxx": ["-O3", "-std=c++17"] + generator_flag,
Tri Dao's avatar
Tri Dao committed
190
191
192
            "nvcc": append_nvcc_threads(
                [
                    "-O3",
193
                    "-std=c++17",
Tri Dao's avatar
Tri Dao committed
194
195
                    "-U__CUDA_NO_HALF_OPERATORS__",
                    "-U__CUDA_NO_HALF_CONVERSIONS__",
196
197
                    "-U__CUDA_NO_HALF2_OPERATORS__",
                    "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
Tri Dao's avatar
Tri Dao committed
198
199
200
201
202
203
204
205
206
207
208
                    "--expt-relaxed-constexpr",
                    "--expt-extended-lambda",
                    "--use_fast_math",
                    "--ptxas-options=-v",
                    "-lineinfo"
                ]
                + generator_flag
                + cc_flag
            ),
        },
        include_dirs=[
Tri Dao's avatar
Tri Dao committed
209
210
            Path(this_dir) / 'csrc' / 'flash_attn',
            Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Tri Dao's avatar
Tri Dao committed
211
            Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
Tri Dao's avatar
Tri Dao committed
212
213
214
215
        ],
    )
)

216
217
218
219
220
221
222
223
224
225
def get_package_version():
    with open(Path(this_dir) / "flash_attn" / "__init__.py", "r") as f:
        version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
    public_version = ast.literal_eval(version_match.group(1))
    local_version = os.environ.get("FLASH_ATTN_LOCAL_VERSION")
    if local_version:
        return f"{public_version}+{local_version}"
    else:
        return str(public_version)

Tri Dao's avatar
Tri Dao committed
226
setup(
Tri Dao's avatar
Tri Dao committed
227
    name="flash_attn",
228
    version=get_package_version(),
Tri Dao's avatar
Tri Dao committed
229
230
231
232
233
234
235
236
237
238
239
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
    ),
    author="Tri Dao",
    author_email="trid@stanford.edu",
    description="Flash Attention: Fast and Memory-Efficient Exact Attention",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://github.com/HazyResearch/flash-attention",
    classifiers=[
        "Programming Language :: Python :: 3",
240
        "License :: OSI Approved :: BSD License",
Phil Wang's avatar
Phil Wang committed
241
        "Operating System :: Unix",
Tri Dao's avatar
Tri Dao committed
242
    ],
Tri Dao's avatar
Tri Dao committed
243
    ext_modules=ext_modules,
244
245
246
247
248
249
    cmdclass={
        'install': CustomInstallCommand,
        "build_ext": BuildExtension
    } if ext_modules else {
        'install': CustomInstallCommand,
    },
Gustaf's avatar
Gustaf committed
250
251
252
253
    python_requires=">=3.7",
    install_requires=[
        "torch",
        "einops",
Pavel Shvets's avatar
Pavel Shvets committed
254
        "packaging",
255
        "ninja",
Gustaf's avatar
Gustaf committed
256
    ],
Tri Dao's avatar
Tri Dao committed
257
)