setup.py 6.86 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
Tri Dao's avatar
Tri Dao committed
5
6
7
8
9
10
11
12
13
14
15
16
from pathlib import Path

from setuptools import setup, find_packages
import subprocess

import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME


with open("README.md", "r", encoding="utf-8") as fh:
    long_description = fh.read()

Tri Dao's avatar
Tri Dao committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))


def get_cuda_bare_metal_version(cuda_dir):
    raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
    output = raw_output.split()
    release_idx = output.index("release") + 1
    release = output[release_idx].split(".")
    bare_metal_major = release[0]
    bare_metal_minor = release[1][0]

    return raw_output, bare_metal_major, bare_metal_minor


def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
    raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
    torch_binary_major = torch.version.cuda.split(".")[0]
    torch_binary_minor = torch.version.cuda.split(".")[1]

    print("\nCompiling cuda extensions with")
    print(raw_output + "from " + cuda_dir + "/bin\n")

    if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
        raise RuntimeError(
            "Cuda extensions are being compiled with a version of Cuda that does "
            "not match the version used to compile Pytorch binaries.  "
            "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
            + "In some cases, a minor-version mismatch will not cause later errors:  "
            "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798.  "
            "You can try commenting out this check (at your own risk)."
        )


def raise_if_cuda_home_none(global_option: str) -> None:
    if CUDA_HOME is not None:
        return
    raise RuntimeError(
        f"{global_option} was requested, but nvcc was not found.  Are you sure your environment has nvcc available?  "
        "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
        "only images whose names contain 'devel' will provide nvcc."
    )


def append_nvcc_threads(nvcc_extra_args):
    _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
    if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
        return nvcc_extra_args + ["--threads", "4"]
    return nvcc_extra_args


if not torch.cuda.is_available():
    # https://github.com/NVIDIA/apex/issues/486
    # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
    # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
    print(
        "\nWarning: Torch did not find available GPUs on this system.\n",
        "If your intention is to cross-compile, this is not an error.\n"
Tri Dao's avatar
Tri Dao committed
76
77
        "By default, We cross-compile for Volta (compute capability 7.0), "
        "Turing (compute capability 7.5),\n"
Tri Dao's avatar
Tri Dao committed
78
79
80
81
82
83
84
        "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
        "If you wish to cross-compile for a single specific architecture,\n"
        'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
    )
    if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
        _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
        if int(bare_metal_major) == 11:
Tri Dao's avatar
Tri Dao committed
85
            os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0"
Tri Dao's avatar
Tri Dao committed
86
            if int(bare_metal_minor) > 0:
Tri Dao's avatar
Tri Dao committed
87
                os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0;8.6"
Tri Dao's avatar
Tri Dao committed
88
        else:
Tri Dao's avatar
Tri Dao committed
89
            os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5"
Tri Dao's avatar
Tri Dao committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

print("\n\ntorch.__version__  = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])

cmdclass = {}
ext_modules = []

# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
    generator_flag = ["-DOLD_GENERATOR_PATH"]

Tri Dao's avatar
Tri Dao committed
105
raise_if_cuda_home_none("flash_attn")
Tri Dao's avatar
Tri Dao committed
106
107
108
109
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) < 11:
Tri Dao's avatar
Tri Dao committed
110
111
112
    raise RuntimeError("FlashAttention is only supported on CUDA 11")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
Tri Dao's avatar
Tri Dao committed
113
114
115
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")

Tri Dao's avatar
Tri Dao committed
116
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
Tri Dao's avatar
Tri Dao committed
117
118
ext_modules.append(
    CUDAExtension(
Tri Dao's avatar
Tri Dao committed
119
        name="flash_attn_cuda",
Tri Dao's avatar
Tri Dao committed
120
        sources=[
Tri Dao's avatar
Tri Dao committed
121
122
123
124
125
            "csrc/flash_attn/fmha_api.cpp",
            "csrc/flash_attn/src/fmha_fprop_fp16_kernel.sm80.cu",
            "csrc/flash_attn/src/fmha_dgrad_fp16_kernel_loop.sm80.cu",
            "csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
            "csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
Tri Dao's avatar
Tri Dao committed
126
127
        ],
        extra_compile_args={
128
            "cxx": ["-O3", "-std=c++17"] + generator_flag,
Tri Dao's avatar
Tri Dao committed
129
130
131
            "nvcc": append_nvcc_threads(
                [
                    "-O3",
132
                    "-std=c++17",
Tri Dao's avatar
Tri Dao committed
133
134
135
136
137
138
139
140
141
142
143
144
145
                    "-U__CUDA_NO_HALF_OPERATORS__",
                    "-U__CUDA_NO_HALF_CONVERSIONS__",
                    "--expt-relaxed-constexpr",
                    "--expt-extended-lambda",
                    "--use_fast_math",
                    "--ptxas-options=-v",
                    "-lineinfo"
                ]
                + generator_flag
                + cc_flag
            ),
        },
        include_dirs=[
Tri Dao's avatar
Tri Dao committed
146
147
            Path(this_dir) / 'csrc' / 'flash_attn',
            Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Tri Dao's avatar
Tri Dao committed
148
            Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
Tri Dao's avatar
Tri Dao committed
149
150
151
152
153
        ],
    )
)

setup(
Tri Dao's avatar
Tri Dao committed
154
    name="flash_attn",
155
    version="0.2",
Tri Dao's avatar
Tri Dao committed
156
157
158
159
160
161
162
163
164
165
166
    packages=find_packages(
        exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
    ),
    author="Tri Dao",
    author_email="trid@stanford.edu",
    description="Flash Attention: Fast and Memory-Efficient Exact Attention",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://github.com/HazyResearch/flash-attention",
    classifiers=[
        "Programming Language :: Python :: 3",
167
        "License :: OSI Approved :: BSD License",
Phil Wang's avatar
Phil Wang committed
168
        "Operating System :: Unix",
Tri Dao's avatar
Tri Dao committed
169
    ],
Tri Dao's avatar
Tri Dao committed
170
171
    ext_modules=ext_modules,
    cmdclass={"build_ext": BuildExtension} if ext_modules else {},
Gustaf's avatar
Gustaf committed
172
173
174
175
176
    python_requires=">=3.7",
    install_requires=[
        "torch",
        "einops",
    ],
Tri Dao's avatar
Tri Dao committed
177
)