setup.py 12.2 KB
Newer Older
chenxl's avatar
chenxl committed
1
2
3
4
5
#!/usr/bin/env python
# coding=utf-8
'''
Description  :  
Author       : chenxl
chenxl's avatar
chenxl committed
6
Date         : 2024-07-27 16:15:27
chenxl's avatar
chenxl committed
7
8
Version      : 1.0.0
LastEditors  : chenxl 
9
LastEditTime : 2024-08-14 16:36:19
chenxl's avatar
chenxl committed
10
11
12
13
Adapted from:
https://github.com/Dao-AILab/flash-attention/blob/v2.6.3/setup.py
Copyright (c) 2023, Tri Dao.
Copyright (c) 2024 by KVCache.AI, All Rights Reserved. 
chenxl's avatar
chenxl committed
14
'''
chenxl's avatar
chenxl committed
15

chenxl's avatar
chenxl committed
16
17
18
19
20
21
import os
import sys
import re
import ast
import subprocess
import platform
chenxl's avatar
chenxl committed
22
import shutil
23
import http.client
chenxl's avatar
chenxl committed
24
25
import urllib.request
import urllib.error
chenxl's avatar
chenxl committed
26
27
28
29
30
from pathlib import Path
from packaging.version import parse
import torch.version
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
from setuptools import setup, Extension
chenxl's avatar
chenxl committed
31
from cpufeature.extension import CPUFeature
chenxl's avatar
chenxl committed
32
33
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME

34
35
36
37
38
39
40
41
42
43
class CpuInstructInfo:
    CPU_INSTRUCT = os.getenv("CPU_INSTRUCT", "NATIVE")
    FANCY = "FANCY"
    AVX512 = "AVX512"
    AVX2 = "AVX2"
    CMAKE_NATIVE = "-DLLAMA_NATIVE=ON"
    CMAKE_FANCY = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON -DLLAMA_AVX512=ON -DLLAMA_AVX512_FANCY_SIMD=ON"
    CMAKE_AVX512 = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON -DLLAMA_AVX512=ON"
    CMAKE_AVX2 = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON"
    
chenxl's avatar
chenxl committed
44
45
46
class VersionInfo:
    THIS_DIR = os.path.dirname(os.path.abspath(__file__))
    PACKAGE_NAME = "ktransformers"
chenxl's avatar
chenxl committed
47
48
49
50
51
    BASE_WHEEL_URL:str = (
        "https://github.com/kvcache-ai/ktransformers/releases/download/{tag_name}/{wheel_filename}"
    )
    FORCE_BUILD = os.getenv("KTRANSFORMERS_FORCE_BUILD", "FALSE") == "TRUE"

chenxl's avatar
chenxl committed
52
    def get_cuda_bare_metal_version(self, cuda_dir):
chenxl's avatar
chenxl committed
53
54
        raw_output = subprocess.check_output(
            [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
chenxl's avatar
chenxl committed
55
56
57
58
59
        output = raw_output.split()
        release_idx = output.index("release") + 1
        bare_metal_version = parse(output[release_idx].split(",")[0])
        cuda_version = f"{bare_metal_version.major}{bare_metal_version.minor}"
        return cuda_version
chenxl's avatar
chenxl committed
60

chenxl's avatar
chenxl committed
61
62
63
64
    def get_cuda_version_of_torch(self,):
        torch_cuda_version = parse(torch.version.cuda)
        cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
        return cuda_version
chenxl's avatar
chenxl committed
65

chenxl's avatar
chenxl committed
66
67
68
69
70
71
    def get_platform(self,):
        """
        Returns the platform name as used in wheel filenames.
        """
        if sys.platform.startswith("linux"):
            return f'linux_{platform.uname().machine}'
chenxl's avatar
chenxl committed
72
73
        elif sys.platform == "win32":
            return "win_amd64"
chenxl's avatar
chenxl committed
74
75
        else:
            raise ValueError("Unsupported platform: {}".format(sys.platform))
chenxl's avatar
chenxl committed
76

chenxl's avatar
chenxl committed
77
    def get_cpu_instruct(self,):
78
79
80
81
82
83
84
85
        if CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.FANCY:
            return "fancy"
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX512:
            return "avx512"
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX2:
            return "avx2"
        else:
            print("Using native cpu instruct")
chenxl's avatar
chenxl committed
86
        if sys.platform.startswith("linux"):
chenxl's avatar
chenxl committed
87
            with open('/proc/cpuinfo', 'r', encoding="utf-8") as cpu_f:
chenxl's avatar
chenxl committed
88
                cpuinfo = cpu_f.read()
chenxl's avatar
chenxl committed
89
90
            flags_line = [line for line in cpuinfo.split(
                '\n') if line.startswith('flags')][0]
chenxl's avatar
chenxl committed
91
            flags = flags_line.split(':')[1].strip().split(' ')
92
93
94
95
            # fancy with AVX512-VL, AVX512-BW, AVX512-DQ, AVX512-VNNI
            for flag in flags:
                if 'avx512bw' in flag:
                    return 'fancy'
chenxl's avatar
chenxl committed
96
97
98
99
100
101
            for flag in flags:
                if 'avx512' in flag:
                    return 'avx512'
            for flag in flags:
                if 'avx2' in flag:
                    return 'avx2'
chenxl's avatar
chenxl committed
102
103
            raise ValueError(
                "Unsupported cpu Instructions: {}".format(flags_line))
chenxl's avatar
chenxl committed
104
105
106
107
108
109
110
111
112
        elif sys.platform == "win32":
            if CPUFeature.get("AVX512bw", False):
                return 'fancy'
            if CPUFeature.get("AVX512f", False):
                return 'avx512'
            if CPUFeature.get("AVX2", False):
                return 'avx2'
            raise ValueError(
                "Unsupported cpu Instructions: {}".format(str(CPUFeature)))
chenxl's avatar
chenxl committed
113
114
115
        else:
            raise ValueError("Unsupported platform: {}".format(sys.platform))

chenxl's avatar
chenxl committed
116
117
118
119
120
    def get_torch_version(self,):
        torch_version_raw = parse(torch.__version__)
        torch_version = f"{torch_version_raw.major}{torch_version_raw.minor}"
        return torch_version
    
chenxl's avatar
chenxl committed
121
122
123
    def get_flash_version(self,):
        version_file = os.path.join(
            Path(VersionInfo.THIS_DIR), VersionInfo.PACKAGE_NAME, "__init__.py")
chenxl's avatar
chenxl committed
124
        with open(version_file, "r", encoding="utf-8") as f:
chenxl's avatar
chenxl committed
125
126
127
128
129
130
131
132
133
134
135
136
            version_match = re.search(
                r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
        flash_version = ast.literal_eval(version_match.group(1))
        return flash_version

    def get_package_version(self, full_version=False):
        flash_version = self.get_flash_version()
        package_version = f"{str(flash_version)}+cu{self.get_cuda_bare_metal_version(CUDA_HOME)}torch{self.get_torch_version()}{self.get_cpu_instruct()}"
        if full_version:
            return package_version
        if not VersionInfo.FORCE_BUILD:
            return str(flash_version)
chenxl's avatar
chenxl committed
137
        return package_version
chenxl's avatar
chenxl committed
138

chenxl's avatar
chenxl committed
139
140
141
142

class BuildWheelsCommand(_bdist_wheel):
    def get_wheel_name(self,):
        version_info = VersionInfo()
chenxl's avatar
chenxl committed
143
144
        package_version = version_info.get_package_version(full_version=True)
        flash_version = version_info.get_flash_version()
chenxl's avatar
chenxl committed
145
        python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
chenxl's avatar
chenxl committed
146
147
148
149
150
        wheel_filename = f"{VersionInfo.PACKAGE_NAME}-{package_version}-{python_version}-{python_version}-{version_info.get_platform()}.whl"
        wheel_url = VersionInfo.BASE_WHEEL_URL.format(tag_name=f"v{flash_version}", wheel_filename=wheel_filename)
        return wheel_filename, wheel_url


chenxl's avatar
chenxl committed
151
    def run(self):
chenxl's avatar
chenxl committed
152
153
        if VersionInfo.FORCE_BUILD:
            super().run()
154
            return
chenxl's avatar
chenxl committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
        wheel_filename, wheel_url = self.get_wheel_name()
        print("Guessing wheel URL: ", wheel_url)
        try:
            urllib.request.urlretrieve(wheel_url, wheel_filename)
            # Make the archive
            # Lifted from the root wheel processing command
            # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
            if not os.path.exists(self.dist_dir):
                os.makedirs(self.dist_dir)

            impl_tag, abi_tag, plat_tag = self.get_tag()
            archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"

            wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
            print("Raw wheel path", wheel_path)
chenxl's avatar
chenxl committed
170
            shutil.move(wheel_filename, wheel_path)
171
        except (urllib.error.HTTPError, urllib.error.URLError, http.client.RemoteDisconnected):
chenxl's avatar
chenxl committed
172
173
174
175
            print("Precompiled wheel not found. Building from source...")
            # If the wheel could not be downloaded, build from source
            super().run()

chenxl's avatar
chenxl committed
176
177
178
179
180
181
182
183
184

# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
    "win32": "Win32",
    "win-amd64": "x64",
    "win-arm32": "ARM",
    "win-arm64": "ARM64",
}

chenxl's avatar
chenxl committed
185

chenxl's avatar
chenxl committed
186
187
188
class CMakeExtension(Extension):
    def __init__(self, name: str, sourcedir: str = "") -> None:
        super().__init__(name, sources=[])
chenxl's avatar
chenxl committed
189
190
191
192
        self.sourcedir = os.fspath(
            Path(sourcedir).resolve() / "ktransformers" / "ktransformers_ext")


chenxl's avatar
chenxl committed
193
class CMakeBuild(BuildExtension):
chenxl's avatar
chenxl committed
194

chenxl's avatar
chenxl committed
195
196
197
198
199
200
201
202
203
204
    def build_extension(self, ext) -> None:
        if not isinstance(ext, CMakeExtension):
            super().build_extension(ext)
            return
        ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name)
        extdir = ext_fullpath.parent.resolve()

        # Using this requires trailing slash for auto-detection & inclusion of
        # auxiliary "native" libs

chenxl's avatar
chenxl committed
205
206
        debug = int(os.environ.get("DEBUG", 0)
                    ) if self.debug is None else self.debug
chenxl's avatar
chenxl committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
        cfg = "Debug" if debug else "Release"

        # CMake lets you override the generator - we need to check this.
        # Can be set with Conda-Build, for example.
        cmake_generator = os.environ.get("CMAKE_GENERATOR", "")

        # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
        # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
        # from Python.
        cmake_args = [
            f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
            f"-DPYTHON_EXECUTABLE={sys.executable}",
            f"-DCMAKE_BUILD_TYPE={cfg}",  # not used on MSVC, but no harm
        ]
        build_args = []
        if "CMAKE_ARGS" in os.environ:
chenxl's avatar
chenxl committed
223
224
            cmake_args += [
                item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
225
226
227
228
229
230
231
232
233
234
235
236
237
            
        if CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.FANCY:
            cpu_args = CpuInstructInfo.CMAKE_FANCY
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX512:
            cpu_args = CpuInstructInfo.CMAKE_AVX512
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX2:
            cpu_args = CpuInstructInfo.CMAKE_AVX2
        else:
            cpu_args = CpuInstructInfo.CMAKE_NATIVE
        
        cmake_args += [
            item for item in cpu_args.split(" ") if item
        ]
chenxl's avatar
chenxl committed
238
        # In this example, we pass in the version to C++. You might not need to.
chenxl's avatar
chenxl committed
239
240
        cmake_args += [
            f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"]
chenxl's avatar
chenxl committed
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        if self.compiler.compiler_type != "msvc":
            if not cmake_generator or cmake_generator == "Ninja":
                try:
                    import ninja

                    ninja_executable_path = Path(ninja.BIN_DIR) / "ninja"
                    cmake_args += [
                        "-GNinja",
                        f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
                    ]
                except ImportError:
                    pass

        else:
            # Single config generators are handled "normally"
chenxl's avatar
chenxl committed
256
257
            single_config = any(
                x in cmake_generator for x in {"NMake", "Ninja"})
chenxl's avatar
chenxl committed
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274

            # CMake allows an arch-in-generator style for backward compatibility
            contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
            if not single_config and not contains_arch:
                cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]

            # Multi-config generators have a different way to specify configs
            if not single_config:
                cmake_args += [
                    f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
                ]
                build_args += ["--config", cfg]

        if sys.platform.startswith("darwin"):
            # Cross-compile support for macOS - respect ARCHFLAGS if set
            archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
            if archs:
chenxl's avatar
chenxl committed
275
276
                cmake_args += [
                    "-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
chenxl's avatar
chenxl committed
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294

        if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
            if hasattr(self, "parallel") and self.parallel:
                build_args += [f"-j{self.parallel}"]

        build_temp = Path(ext.sourcedir) / "build"
        if not build_temp.exists():
            build_temp.mkdir(parents=True)
        subprocess.run(
            ["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True
        )
        subprocess.run(
            ["cmake", "--build", ".", *build_args], cwd=build_temp, check=True
        )


setup(
    version=VersionInfo().get_package_version(),
chenxl's avatar
chenxl committed
295
    cmdclass={"bdist_wheel":BuildWheelsCommand ,"build_ext": CMakeBuild},
chenxl's avatar
chenxl committed
296
    ext_modules=[
chenxl's avatar
chenxl committed
297
298
299
300
301
        CMakeExtension("cpuinfer_ext"),
        CUDAExtension('KTransformersOps', [
            'ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu',
            'ktransformers/ktransformers_ext/cuda/binding.cpp',
            'ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu'
302
303
304
305
306
307
308
309
310
311
        ],
        extra_compile_args={
                'cxx': ['-O3'],
                'nvcc': [
                    '-O3',
                    '--use_fast_math',
                    '-Xcompiler', '-fPIC',
                ]
            }
        )
chenxl's avatar
chenxl committed
312
313
    ]
)