setup.py 14.8 KB
Newer Older
chenxl's avatar
chenxl committed
1
2
3
#!/usr/bin/env python
# coding=utf-8
'''
Xiaodong Ye's avatar
Xiaodong Ye committed
4
Description  :
chenxl's avatar
chenxl committed
5
Author       : chenxl
chenxl's avatar
chenxl committed
6
Date         : 2024-07-27 16:15:27
chenxl's avatar
chenxl committed
7
Version      : 1.0.0
Xiaodong Ye's avatar
Xiaodong Ye committed
8
LastEditors  : chenxl
9
LastEditTime : 2024-08-14 16:36:19
chenxl's avatar
chenxl committed
10
11
12
Adapted from:
https://github.com/Dao-AILab/flash-attention/blob/v2.6.3/setup.py
Copyright (c) 2023, Tri Dao.
Xiaodong Ye's avatar
Xiaodong Ye committed
13
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
chenxl's avatar
chenxl committed
14
'''
chenxl's avatar
chenxl committed
15

chenxl's avatar
chenxl committed
16
17
18
19
20
21
import os
import sys
import re
import ast
import subprocess
import platform
chenxl's avatar
chenxl committed
22
import shutil
23
import http.client
chenxl's avatar
chenxl committed
24
25
import urllib.request
import urllib.error
chenxl's avatar
chenxl committed
26
27
28
29
30
from pathlib import Path
from packaging.version import parse
import torch.version
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
from setuptools import setup, Extension
chenxl's avatar
chenxl committed
31
from cpufeature.extension import CPUFeature
chenxl's avatar
chenxl committed
32
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
Xiaodong Ye's avatar
Xiaodong Ye committed
33
34
35
36
37
try:
    from torch_musa.utils.simple_porting import SimplePorting
    from torch_musa.utils.musa_extension import BuildExtension, MUSAExtension, MUSA_HOME
except ImportError:
    MUSA_HOME=None
chenxl's avatar
chenxl committed
38

39
40
41
42
43
44
45
46
47
class CpuInstructInfo:
    CPU_INSTRUCT = os.getenv("CPU_INSTRUCT", "NATIVE")
    FANCY = "FANCY"
    AVX512 = "AVX512"
    AVX2 = "AVX2"
    CMAKE_NATIVE = "-DLLAMA_NATIVE=ON"
    CMAKE_FANCY = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON -DLLAMA_AVX512=ON -DLLAMA_AVX512_FANCY_SIMD=ON"
    CMAKE_AVX512 = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON -DLLAMA_AVX512=ON"
    CMAKE_AVX2 = "-DLLAMA_NATIVE=OFF -DLLAMA_FMA=ON -DLLAMA_F16C=ON -DLLAMA_AVX=ON -DLLAMA_AVX2=ON"
Xiaodong Ye's avatar
Xiaodong Ye committed
48

chenxl's avatar
chenxl committed
49
50
51
class VersionInfo:
    THIS_DIR = os.path.dirname(os.path.abspath(__file__))
    PACKAGE_NAME = "ktransformers"
chenxl's avatar
chenxl committed
52
53
54
55
56
    BASE_WHEEL_URL:str = (
        "https://github.com/kvcache-ai/ktransformers/releases/download/{tag_name}/{wheel_filename}"
    )
    FORCE_BUILD = os.getenv("KTRANSFORMERS_FORCE_BUILD", "FALSE") == "TRUE"

Xiaodong Ye's avatar
Xiaodong Ye committed
57
58
59
60
61
62
63
64
65
66
    def get_musa_bare_metal_version(self, musa_dir):
        raw_output = subprocess.run(
            [musa_dir + "/bin/mcc", "-v"], check=True,
            stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode("utf-8")
        output = raw_output.split()
        release_idx = output.index("version") + 1
        bare_metal_version = parse(output[release_idx].split(",")[0])
        musa_version = f"{bare_metal_version.major}{bare_metal_version.minor}"
        return musa_version

chenxl's avatar
chenxl committed
67
    def get_cuda_bare_metal_version(self, cuda_dir):
chenxl's avatar
chenxl committed
68
69
        raw_output = subprocess.check_output(
            [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
chenxl's avatar
chenxl committed
70
71
72
73
74
        output = raw_output.split()
        release_idx = output.index("release") + 1
        bare_metal_version = parse(output[release_idx].split(",")[0])
        cuda_version = f"{bare_metal_version.major}{bare_metal_version.minor}"
        return cuda_version
chenxl's avatar
chenxl committed
75

Xiaodong Ye's avatar
Xiaodong Ye committed
76
    def get_cuda_version_of_torch(self):
chenxl's avatar
chenxl committed
77
78
79
        torch_cuda_version = parse(torch.version.cuda)
        cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
        return cuda_version
chenxl's avatar
chenxl committed
80

chenxl's avatar
chenxl committed
81
82
83
84
85
86
    def get_platform(self,):
        """
        Returns the platform name as used in wheel filenames.
        """
        if sys.platform.startswith("linux"):
            return f'linux_{platform.uname().machine}'
chenxl's avatar
chenxl committed
87
88
        elif sys.platform == "win32":
            return "win_amd64"
chenxl's avatar
chenxl committed
89
90
        else:
            raise ValueError("Unsupported platform: {}".format(sys.platform))
chenxl's avatar
chenxl committed
91

chenxl's avatar
chenxl committed
92
    def get_cpu_instruct(self,):
93
94
95
96
97
98
99
100
        if CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.FANCY:
            return "fancy"
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX512:
            return "avx512"
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX2:
            return "avx2"
        else:
            print("Using native cpu instruct")
chenxl's avatar
chenxl committed
101
        if sys.platform.startswith("linux"):
chenxl's avatar
chenxl committed
102
            with open('/proc/cpuinfo', 'r', encoding="utf-8") as cpu_f:
chenxl's avatar
chenxl committed
103
                cpuinfo = cpu_f.read()
chenxl's avatar
chenxl committed
104
105
            flags_line = [line for line in cpuinfo.split(
                '\n') if line.startswith('flags')][0]
chenxl's avatar
chenxl committed
106
            flags = flags_line.split(':')[1].strip().split(' ')
107
108
109
110
            # fancy with AVX512-VL, AVX512-BW, AVX512-DQ, AVX512-VNNI
            for flag in flags:
                if 'avx512bw' in flag:
                    return 'fancy'
chenxl's avatar
chenxl committed
111
112
113
114
115
116
            for flag in flags:
                if 'avx512' in flag:
                    return 'avx512'
            for flag in flags:
                if 'avx2' in flag:
                    return 'avx2'
chenxl's avatar
chenxl committed
117
118
            raise ValueError(
                "Unsupported cpu Instructions: {}".format(flags_line))
chenxl's avatar
chenxl committed
119
120
121
122
123
124
125
126
127
        elif sys.platform == "win32":
            if CPUFeature.get("AVX512bw", False):
                return 'fancy'
            if CPUFeature.get("AVX512f", False):
                return 'avx512'
            if CPUFeature.get("AVX2", False):
                return 'avx2'
            raise ValueError(
                "Unsupported cpu Instructions: {}".format(str(CPUFeature)))
chenxl's avatar
chenxl committed
128
129
130
        else:
            raise ValueError("Unsupported platform: {}".format(sys.platform))

chenxl's avatar
chenxl committed
131
132
133
134
    def get_torch_version(self,):
        torch_version_raw = parse(torch.__version__)
        torch_version = f"{torch_version_raw.major}{torch_version_raw.minor}"
        return torch_version
Xiaodong Ye's avatar
Xiaodong Ye committed
135

chenxl's avatar
chenxl committed
136
137
138
    def get_flash_version(self,):
        version_file = os.path.join(
            Path(VersionInfo.THIS_DIR), VersionInfo.PACKAGE_NAME, "__init__.py")
chenxl's avatar
chenxl committed
139
        with open(version_file, "r", encoding="utf-8") as f:
chenxl's avatar
chenxl committed
140
141
142
143
144
145
            version_match = re.search(
                r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
        flash_version = ast.literal_eval(version_match.group(1))
        return flash_version

    def get_package_version(self, full_version=False):
Xiaodong Ye's avatar
Xiaodong Ye committed
146
147
148
149
150
151
152
153
154
155
156
        flash_version = str(self.get_flash_version())
        torch_version = self.get_torch_version()
        cpu_instruct = self.get_cpu_instruct()
        backend_version = ""
        if CUDA_HOME is not None:
            backend_version = f"cu{self.get_cuda_bare_metal_version(CUDA_HOME)}"
        elif MUSA_HOME is not None:
            backend_version = f"mu{self.get_musa_bare_metal_version(MUSA_HOME)}"
        else:
            raise ValueError("Unsupported backend: CUDA_HOME and MUSA_HOME are not set.")
        package_version = f"{flash_version}+{backend_version}torch{torch_version}{cpu_instruct}"
chenxl's avatar
chenxl committed
157
158
159
        if full_version:
            return package_version
        if not VersionInfo.FORCE_BUILD:
Xiaodong Ye's avatar
Xiaodong Ye committed
160
            return flash_version
chenxl's avatar
chenxl committed
161
        return package_version
chenxl's avatar
chenxl committed
162

chenxl's avatar
chenxl committed
163
164
165
166

class BuildWheelsCommand(_bdist_wheel):
    def get_wheel_name(self,):
        version_info = VersionInfo()
chenxl's avatar
chenxl committed
167
168
        package_version = version_info.get_package_version(full_version=True)
        flash_version = version_info.get_flash_version()
chenxl's avatar
chenxl committed
169
        python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
chenxl's avatar
chenxl committed
170
171
172
173
174
        wheel_filename = f"{VersionInfo.PACKAGE_NAME}-{package_version}-{python_version}-{python_version}-{version_info.get_platform()}.whl"
        wheel_url = VersionInfo.BASE_WHEEL_URL.format(tag_name=f"v{flash_version}", wheel_filename=wheel_filename)
        return wheel_filename, wheel_url


chenxl's avatar
chenxl committed
175
    def run(self):
chenxl's avatar
chenxl committed
176
177
        if VersionInfo.FORCE_BUILD:
            super().run()
178
            return
chenxl's avatar
chenxl committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        wheel_filename, wheel_url = self.get_wheel_name()
        print("Guessing wheel URL: ", wheel_url)
        try:
            urllib.request.urlretrieve(wheel_url, wheel_filename)
            # Make the archive
            # Lifted from the root wheel processing command
            # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
            if not os.path.exists(self.dist_dir):
                os.makedirs(self.dist_dir)

            impl_tag, abi_tag, plat_tag = self.get_tag()
            archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"

            wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
            print("Raw wheel path", wheel_path)
chenxl's avatar
chenxl committed
194
            shutil.move(wheel_filename, wheel_path)
195
        except (urllib.error.HTTPError, urllib.error.URLError, http.client.RemoteDisconnected):
chenxl's avatar
chenxl committed
196
197
198
199
            print("Precompiled wheel not found. Building from source...")
            # If the wheel could not be downloaded, build from source
            super().run()

chenxl's avatar
chenxl committed
200
201
202
203
204
205
206
207
208

# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
    "win32": "Win32",
    "win-amd64": "x64",
    "win-arm32": "ARM",
    "win-arm64": "ARM64",
}

chenxl's avatar
chenxl committed
209

chenxl's avatar
chenxl committed
210
211
212
class CMakeExtension(Extension):
    def __init__(self, name: str, sourcedir: str = "") -> None:
        super().__init__(name, sources=[])
chenxl's avatar
chenxl committed
213
214
215
216
        self.sourcedir = os.fspath(
            Path(sourcedir).resolve() / "ktransformers" / "ktransformers_ext")


chenxl's avatar
chenxl committed
217
class CMakeBuild(BuildExtension):
chenxl's avatar
chenxl committed
218

chenxl's avatar
chenxl committed
219
220
221
222
223
224
225
226
227
228
    def build_extension(self, ext) -> None:
        if not isinstance(ext, CMakeExtension):
            super().build_extension(ext)
            return
        ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name)
        extdir = ext_fullpath.parent.resolve()

        # Using this requires trailing slash for auto-detection & inclusion of
        # auxiliary "native" libs

chenxl's avatar
chenxl committed
229
230
        debug = int(os.environ.get("DEBUG", 0)
                    ) if self.debug is None else self.debug
chenxl's avatar
chenxl committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        cfg = "Debug" if debug else "Release"

        # CMake lets you override the generator - we need to check this.
        # Can be set with Conda-Build, for example.
        cmake_generator = os.environ.get("CMAKE_GENERATOR", "")

        # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
        # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
        # from Python.
        cmake_args = [
            f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
            f"-DPYTHON_EXECUTABLE={sys.executable}",
            f"-DCMAKE_BUILD_TYPE={cfg}",  # not used on MSVC, but no harm
        ]
Xiaodong Ye's avatar
Xiaodong Ye committed
245
246
247
248
249
250
251
252

        if CUDA_HOME is not None:
            cmake_args += ["-DKTRANSFORMERS_USE_CUDA=ON"]
        elif MUSA_HOME is not None:
            cmake_args += ["-DKTRANSFORMERS_USE_MUSA=ON"]
        else:
            raise ValueError("Unsupported backend: CUDA_HOME and MUSA_HOME are not set.")

chenxl's avatar
chenxl committed
253
254
        build_args = []
        if "CMAKE_ARGS" in os.environ:
chenxl's avatar
chenxl committed
255
256
            cmake_args += [
                item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
Xiaodong Ye's avatar
Xiaodong Ye committed
257

258
259
260
261
262
263
264
265
        if CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.FANCY:
            cpu_args = CpuInstructInfo.CMAKE_FANCY
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX512:
            cpu_args = CpuInstructInfo.CMAKE_AVX512
        elif CpuInstructInfo.CPU_INSTRUCT == CpuInstructInfo.AVX2:
            cpu_args = CpuInstructInfo.CMAKE_AVX2
        else:
            cpu_args = CpuInstructInfo.CMAKE_NATIVE
Xiaodong Ye's avatar
Xiaodong Ye committed
266

267
268
269
        cmake_args += [
            item for item in cpu_args.split(" ") if item
        ]
chenxl's avatar
chenxl committed
270
        # In this example, we pass in the version to C++. You might not need to.
chenxl's avatar
chenxl committed
271
272
        cmake_args += [
            f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"]
chenxl's avatar
chenxl committed
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
        if self.compiler.compiler_type != "msvc":
            if not cmake_generator or cmake_generator == "Ninja":
                try:
                    import ninja

                    ninja_executable_path = Path(ninja.BIN_DIR) / "ninja"
                    cmake_args += [
                        "-GNinja",
                        f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
                    ]
                except ImportError:
                    pass

        else:
            # Single config generators are handled "normally"
chenxl's avatar
chenxl committed
288
289
            single_config = any(
                x in cmake_generator for x in {"NMake", "Ninja"})
chenxl's avatar
chenxl committed
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

            # CMake allows an arch-in-generator style for backward compatibility
            contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
            if not single_config and not contains_arch:
                cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]

            # Multi-config generators have a different way to specify configs
            if not single_config:
                cmake_args += [
                    f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
                ]
                build_args += ["--config", cfg]

        if sys.platform.startswith("darwin"):
            # Cross-compile support for macOS - respect ARCHFLAGS if set
            archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
            if archs:
chenxl's avatar
chenxl committed
307
308
                cmake_args += [
                    "-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
chenxl's avatar
chenxl committed
309
310
311
312

        if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
            if hasattr(self, "parallel") and self.parallel:
                build_args += [f"-j{self.parallel}"]
liam's avatar
liam committed
313
        print("CMake args:", cmake_args)
chenxl's avatar
chenxl committed
314
315
316
        build_temp = Path(ext.sourcedir) / "build"
        if not build_temp.exists():
            build_temp.mkdir(parents=True)
liam's avatar
liam committed
317
318
        result = subprocess.run(
            ["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True , capture_output=True
chenxl's avatar
chenxl committed
319
        )
liam's avatar
liam committed
320
321
        print("Standard output:", result.stdout)
        print("Standard error:", result.stderr)
chenxl's avatar
chenxl committed
322
        subprocess.run(
Xiaodong Ye's avatar
Xiaodong Ye committed
323
            ["cmake", "--build", ".", "--verbose", *build_args], cwd=build_temp, check=True
chenxl's avatar
chenxl committed
324
325
        )

Xiaodong Ye's avatar
Xiaodong Ye committed
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
if CUDA_HOME is not None:
    ops_module = CUDAExtension('KTransformersOps', [
        'ktransformers/ktransformers_ext/cuda/custom_gguf/dequant.cu',
        'ktransformers/ktransformers_ext/cuda/binding.cpp',
        'ktransformers/ktransformers_ext/cuda/gptq_marlin/gptq_marlin.cu'
    ],
    extra_compile_args={
            'cxx': ['-O3', '-DKTRANSFORMERS_USE_CUDA'],
            'nvcc': [
                '-O3',
                '--use_fast_math',
                '-Xcompiler', '-fPIC',
                '-DKTRANSFORMERS_USE_CUDA',
            ]
        }
    )
elif MUSA_HOME is not None:
    SimplePorting(cuda_dir_path="ktransformers/ktransformers_ext/cuda", mapping_rule={
        # Common rules
        "at::cuda": "at::musa",
        "#include <ATen/cuda/CUDAContext.h>": "#include \"torch_musa/csrc/aten/musa/MUSAContext.h\"",
        "#include <c10/cuda/CUDAGuard.h>": "#include \"torch_musa/csrc/core/MUSAGuard.h\"",
        }).run()
    ops_module = MUSAExtension('KTransformersOps', [
        'ktransformers/ktransformers_ext/cuda_musa/custom_gguf/dequant.mu',
        'ktransformers/ktransformers_ext/cuda_musa/binding.cpp',
        # TODO: Add Marlin support for MUSA.
        # 'ktransformers/ktransformers_ext/cuda_musa/gptq_marlin/gptq_marlin.mu'
    ],
    extra_compile_args={
            'cxx': ['force_mcc'],
            'mcc': [
                '-O3',
                '-DKTRANSFORMERS_USE_MUSA',
                '-DTHRUST_IGNORE_CUB_VERSION_CHECK',
            ]
        }
    )
else:
    raise ValueError("Unsupported backend: CUDA_HOME and MUSA_HOME are not set.")
chenxl's avatar
chenxl committed
366
367
368

setup(
    version=VersionInfo().get_package_version(),
chenxl's avatar
chenxl committed
369
    cmdclass={"bdist_wheel":BuildWheelsCommand ,"build_ext": CMakeBuild},
chenxl's avatar
chenxl committed
370
    ext_modules=[
chenxl's avatar
chenxl committed
371
        CMakeExtension("cpuinfer_ext"),
Xiaodong Ye's avatar
Xiaodong Ye committed
372
        ops_module,
chenxl's avatar
chenxl committed
373
374
    ]
)