CMakeLists.txt 21.6 KB
Newer Older
1
2
3
cmake_minimum_required(VERSION 3.26 FATAL_ERROR)
project(sgl-kernel LANGUAGES CXX CUDA)

4
5
6
7
# utils
include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake)
include(FetchContent)

8
# CMake
9
cmake_policy(SET CMP0169 OLD)
10
cmake_policy(SET CMP0177 NEW)
11
12
13
14
set(CMAKE_COLOR_DIAGNOSTICS ON)
set(CMAKE_VERBOSE_MAKEFILE ON CACHE BOOL "ON")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_SHARED_LIBRARY_PREFIX "")
15

16
# Python
17
find_package(Python COMPONENTS Interpreter Development.Module ${SKBUILD_SABI_COMPONENT} REQUIRED)
18

19
20
21
22
# CXX
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")

Yineng Zhang's avatar
Yineng Zhang committed
23
# CUDA
24
25
enable_language(CUDA)
find_package(CUDAToolkit REQUIRED)
26
set_property(GLOBAL PROPERTY CUDA_SEPARABLE_COMPILATION ON)
27
28

message(STATUS "Detected CUDA_VERSION=${CUDA_VERSION}")
Johnny's avatar
Johnny committed
29
30
31
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "13.0")
    message("CUDA_VERSION ${CUDA_VERSION} >= 13.0")
elseif ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8")
32
33
34
35
36
37
38
39
40
    message("CUDA_VERSION ${CUDA_VERSION} >= 12.8")
elseif ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.4")
    message("CUDA_VERSION ${CUDA_VERSION} >= 12.4")
elseif ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.1")
    message("CUDA_VERSION ${CUDA_VERSION} >= 12.1")
elseif ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "11.8")
    message("CUDA_VERSION ${CUDA_VERSION} >= 11.8")
endif()

41
# Torch
42
find_package(Torch REQUIRED)
43
clear_cuda_arches(CMAKE_FLAG)
44

Lianmin Zheng's avatar
Lianmin Zheng committed
45
# Third Party repos
46
# cutlass
47
48
49
FetchContent_Declare(
    repo-cutlass
    GIT_REPOSITORY https://github.com/NVIDIA/cutlass
50
    GIT_TAG        57e3cfb47a2d9e0d46eb6335c3dc411498efa198
Zhiqiang Xie's avatar
Zhiqiang Xie committed
51
    GIT_SHALLOW    OFF
52
53
)
FetchContent_Populate(repo-cutlass)
54

55
# DeepGEMM
56
57
FetchContent_Declare(
    repo-deepgemm
58
    GIT_REPOSITORY https://github.com/sgl-project/DeepGEMM
59
    GIT_TAG        f4adba8a6695e635b0106ce3dae3202016ad0ee5
Zhiqiang Xie's avatar
Zhiqiang Xie committed
60
    GIT_SHALLOW    OFF
61
62
)
FetchContent_Populate(repo-deepgemm)
63

64
# fmt
65
66
67
68
69
70
71
72
FetchContent_Declare(
    repo-fmt
    GIT_REPOSITORY https://github.com/fmtlib/fmt
    GIT_TAG        553ec11ec06fbe0beebfbb45f9dc3c9eabd83d28
    GIT_SHALLOW    OFF
)
FetchContent_Populate(repo-fmt)

73
# Triton kernel
74
75
76
77
78
79
80
81
FetchContent_Declare(
    repo-triton
    GIT_REPOSITORY "https://github.com/triton-lang/triton"
    GIT_TAG        8f9f695ea8fde23a0c7c88e4ab256634ca27789f
    GIT_SHALLOW    OFF
)
FetchContent_Populate(repo-triton)

82
# flashinfer
83
84
FetchContent_Declare(
    repo-flashinfer
85
    GIT_REPOSITORY https://github.com/flashinfer-ai/flashinfer.git
86
    GIT_TAG        bc29697ba20b7e6bdb728ded98f04788e16ee021
87
88
89
    GIT_SHALLOW    OFF
)
FetchContent_Populate(repo-flashinfer)
90

91
92
93
94
# flash-attention
FetchContent_Declare(
    repo-flash-attention
    GIT_REPOSITORY https://github.com/sgl-project/sgl-attn
95
    GIT_TAG        f20a52329482ddca4a627b2f028f88c2959ee299
96
    GIT_SHALLOW    OFF
97
98
)
FetchContent_Populate(repo-flash-attention)
99

100
101
102
103
# flash-attention origin
FetchContent_Declare(
    repo-flash-attention-origin
    GIT_REPOSITORY https://github.com/Dao-AILab/flash-attention.git
104
    GIT_TAG        9dbed03d1a7a5862998c182c83d8265fea9dc21b
105
106
107
108
    GIT_SHALLOW    OFF
)
FetchContent_Populate(repo-flash-attention-origin)

109
110
111
112
113
114
115
116
# mscclpp
FetchContent_Declare(
    repo-mscclpp
    GIT_REPOSITORY https://github.com/microsoft/mscclpp.git
    GIT_TAG        51eca89d20f0cfb3764ccd764338d7b22cd486a6
    GIT_SHALLOW    OFF
)
FetchContent_Populate(repo-mscclpp)
117

118
119
120
121
122
123
124
125
126
# fast-hadamard-transform
FetchContent_Declare(
    repo-fast-hadamard-transform
    GIT_REPOSITORY https://github.com/sgl-project/fast-hadamard-transform.git
    GIT_TAG 48f3c13764dc2ec662ade842a4696a90a137f1bc
    GIT_SHALLOW OFF
)
FetchContent_Populate(repo-fast-hadamard-transform)

127
128
129
130
131
132
133
134
# ccache option
option(ENABLE_CCACHE "Whether to use ccache" ON)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND AND ENABLE_CCACHE AND DEFINED ENV{CCACHE_DIR})
    message(STATUS "Building with CCACHE enabled")
    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "ccache")
    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "ccache")
endif()
135

136
137
138
139
140
141
142
143
# Enable gencode below SM90
option(ENABLE_BELOW_SM90 "Enable below SM90" ON)

if (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
    set(ENABLE_BELOW_SM90 OFF)
    message(STATUS "For aarch64, disable gencode below SM90 by default")
endif()

144
145
146
147
148
149
150
151
152
153
154
155
156
157
include_directories(
    ${PROJECT_SOURCE_DIR}/include
    ${PROJECT_SOURCE_DIR}/csrc
)

set(SGL_KERNEL_CUDA_FLAGS
    "-DNDEBUG"
    "-DOPERATOR_NAMESPACE=sgl-kernel"
    "-O3"
    "-Xcompiler"
    "-fPIC"
    "-gencode=arch=compute_90,code=sm_90"
    "-std=c++17"
    "-DFLASHINFER_ENABLE_F16"
158
    "-DCUTE_USE_PACKED_TUPLE=1"
159
160
161
162
163
164
    "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1"
    "-DCUTLASS_VERSIONS_GENERATED"
    "-DCUTLASS_TEST_LEVEL=0"
    "-DCUTLASS_TEST_ENABLE_CACHED_RESULTS=1"
    "-DCUTLASS_DEBUG_TRACE_LEVEL=0"
    "--expt-relaxed-constexpr"
165
    "--expt-extended-lambda"
166

167
168
169
170
    # The following flag leads to the CMAKE_BUILD_PARALLEL_LEVEL breaking,
    # it triggers OOM with low memory host. Extract the threads number to
    # option named SGL_KERNEL_COMPILE_THREADS, default value 32.
    # "--threads=32"
171

172
173
174
175
176
177
178
    # Supress warnings
    "-Xcompiler=-Wno-clang-format-violations"
    "-Xcompiler=-Wno-conversion"
    "-Xcompiler=-Wno-deprecated-declarations"
    "-Xcompiler=-Wno-terminate"
    "-Xcompiler=-Wfatal-errors"
    "-Xcompiler=-ftemplate-backtrace-limit=1"
179
180
    "-Xcudafe=--diag_suppress=177"   # variable was declared but never referenced
    "-Xcudafe=--diag_suppress=2361"  # invalid narrowing conversion from "char" to "signed char"
181
182
183
184

    # uncomment to debug
    # "--ptxas-options=-v"
    # "--ptxas-options=--verbose,--register-usage-level=10,--warn-on-local-memory-usage"
185
186
)

187
188
189
190
191
192
193
194
195
196
197
198
199
200
set(SGL_KERNEL_COMPILE_THREADS 32 CACHE STRING "Set compilation threads, default 32")

# When SGL_KERNEL_COMPILE_THREADS value is less than 1, set it to 1
if (NOT SGL_KERNEL_COMPILE_THREADS MATCHES "^[0-9]+$")
    message(FATAL_ERROR "SGL_KERNEL_COMPILE_THREADS must be an integer, but was set to '${SGL_KERNEL_COMPILE_THREADS}'.")
elseif (SGL_KERNEL_COMPILE_THREADS LESS 1)
    message(STATUS "SGL_KERNEL_COMPILE_THREADS was set to a value less than 1. Using 1 instead.")
    set(SGL_KERNEL_COMPILE_THREADS 1)
endif()

list(APPEND SGL_KERNEL_CUDA_FLAGS
    "--threads=${SGL_KERNEL_COMPILE_THREADS}"
)

201
202
203
204
option(SGL_KERNEL_ENABLE_BF16             "Enable BF16"             ON)
option(SGL_KERNEL_ENABLE_FP8              "Enable FP8"              ON)
option(SGL_KERNEL_ENABLE_FP4              "Enable FP4"              OFF)
option(SGL_KERNEL_ENABLE_FA3              "Enable FA3"              OFF)
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
option(SGL_KERNEL_ENABLE_SM90A            "Enable SM90A"            OFF)
option(SGL_KERNEL_ENABLE_SM100A           "Enable SM100A"           OFF)

if (SGL_KERNEL_ENABLE_BF16)
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-DFLASHINFER_ENABLE_BF16"
    )
endif()

if (SGL_KERNEL_ENABLE_FP8)
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-DFLASHINFER_ENABLE_FP8"
        "-DFLASHINFER_ENABLE_FP8_E4M3"
        "-DFLASHINFER_ENABLE_FP8_E5M2"
    )
endif()
221

222
223
224
225
226
if (ENABLE_BELOW_SM90)
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-gencode=arch=compute_80,code=sm_80"
        "-gencode=arch=compute_89,code=sm_89"
    )
Johnny's avatar
Johnny committed
227
228
229
230
231
232
    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
        list(APPEND SGL_KERNEL_CUDA_FLAGS
            "-gencode=arch=compute_87,code=sm_87"
        )
    endif()

233
234
endif()

235
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8" OR SGL_KERNEL_ENABLE_SM100A)
236
237
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-gencode=arch=compute_100a,code=sm_100a"
238
        "-gencode=arch=compute_120a,code=sm_120a"
239
    )
240
241
242
243
244
245
    # refer sm_121, sm_110 and sm_101 description  https://github.com/pytorch/pytorch/pull/156176
    if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "13.0")
        list(APPEND SGL_KERNEL_CUDA_FLAGS
            "-gencode=arch=compute_103a,code=sm_103a"
            "--compress-mode=size"
        )
Johnny's avatar
Johnny committed
246
247
248
249
250
251
        if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
            list(APPEND SGL_KERNEL_CUDA_FLAGS
                "-gencode=arch=compute_110a,code=sm_110a"
                "-gencode=arch=compute_121a,code=sm_121a"
            )
        endif()
252
    else()
Johnny's avatar
Johnny committed
253
254
255
256
257
        if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
            list(APPEND SGL_KERNEL_CUDA_FLAGS
                "-gencode=arch=compute_101a,code=sm_101a"
            )
        endif()
258
    endif()
259
260
endif()

261
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.4")
262
    set(SGL_KERNEL_ENABLE_FA3 ON)
263
264
265
266
267
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-gencode=arch=compute_90a,code=sm_90a"
    )
endif()

268
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8" OR SGL_KERNEL_ENABLE_FP4)
269
270
271
272
273
    list(APPEND SGL_KERNEL_CUDA_FLAGS
        "-DENABLE_NVFP4=1"
    )
endif()

Lianmin Zheng's avatar
Lianmin Zheng committed
274
275
# All source files
# NOTE: Please sort the filenames alphabetically
276
set(SOURCES
277
    "csrc/allreduce/custom_all_reduce.cu"
278
    "csrc/allreduce/mscclpp_allreduce.cu"
Yineng Zhang's avatar
Yineng Zhang committed
279
    "csrc/attention/cascade.cu"
280
    "csrc/attention/cutlass_mla_kernel.cu"
281
    "csrc/attention/lightning_attention_decode_kernel.cu"
282
283
    "csrc/attention/merge_attn_states.cu"
    "csrc/attention/vertical_slash_index.cu"
Lianmin Zheng's avatar
Lianmin Zheng committed
284
    "csrc/common_extension.cc"
285
    "csrc/elementwise/activation.cu"
286
    "csrc/elementwise/cast.cu"
287
    "csrc/elementwise/concat_mla.cu"
Lianmin Zheng's avatar
Lianmin Zheng committed
288
    "csrc/elementwise/copy.cu"
289
290
    "csrc/elementwise/fused_add_rms_norm_kernel.cu"
    "csrc/elementwise/rope.cu"
291
    "csrc/elementwise/topk.cu"
Lianmin Zheng's avatar
Lianmin Zheng committed
292
    "csrc/expert_specialization/es_fp8_blockwise.cu"
293

294
295
    "csrc/gemm/awq_kernel.cu"
    "csrc/gemm/bmm_fp8.cu"
296
    "csrc/gemm/dsv3_fused_a_gemm.cu"
297
298
299
    "csrc/gemm/dsv3_router_gemm_bf16_out.cu"
    "csrc/gemm/dsv3_router_gemm_entry.cu"
    "csrc/gemm/dsv3_router_gemm_float_out.cu"
300
301
302
    "csrc/gemm/fp8_blockwise_gemm_kernel.cu"
    "csrc/gemm/fp8_gemm_kernel.cu"
    "csrc/gemm/int8_gemm_kernel.cu"
303
    "csrc/gemm/nvfp4_expert_quant.cu"
304
305
306
307
308
309
    "csrc/gemm/nvfp4_quant_entry.cu"
    "csrc/gemm/nvfp4_quant_kernels.cu"
    "csrc/gemm/nvfp4_scaled_mm_entry.cu"
    "csrc/gemm/nvfp4_scaled_mm_kernels.cu"
    "csrc/gemm/per_tensor_quant_fp8.cu"
    "csrc/gemm/per_token_group_quant_8bit.cu"
310
    "csrc/gemm/per_token_group_quant_8bit_v2.cu"
311
    "csrc/gemm/per_token_quant_fp8.cu"
HandH1998's avatar
HandH1998 committed
312
313
    "csrc/gemm/qserve_w4a8_per_chn_gemm.cu"
    "csrc/gemm/qserve_w4a8_per_group_gemm.cu"
314
315
316
317
    "csrc/gemm/marlin/gptq_marlin.cu"
    "csrc/gemm/marlin/gptq_marlin_repack.cu"
    "csrc/gemm/marlin/awq_marlin_repack.cu"
    "csrc/gemm/gptq/gptq_kernel.cu"
318
    "csrc/grammar/apply_token_bitmask_inplace_cuda.cu"
319

Lianmin Zheng's avatar
Lianmin Zheng committed
320
    "csrc/kvcacheio/transfer.cu"
321
    "csrc/mamba/causal_conv1d.cu"
Lianmin Zheng's avatar
Lianmin Zheng committed
322
    "csrc/memory/store.cu"
323
    "csrc/memory/weak_ref_tensor.cpp"
324

325
326
327
    "csrc/moe/cutlass_moe/w4a8/scaled_mm_entry.cu"
    "csrc/moe/cutlass_moe/w4a8/w4a8_moe_data.cu"
    "csrc/moe/cutlass_moe/w4a8/w4a8_grouped_mm_c3x.cu"
328
    "csrc/moe/marlin_moe_wna16/ops.cu"
329
330
    "csrc/moe/moe_align_kernel.cu"
    "csrc/moe/moe_fused_gate.cu"
331
    "csrc/moe/moe_sum.cu"
332
    "csrc/moe/moe_sum_reduce.cu"
333
334
335
336
    "csrc/moe/moe_topk_softmax_kernels.cu"
    "csrc/moe/nvfp4_blockwise_moe.cu"
    "csrc/moe/fp8_blockwise_moe_kernel.cu"
    "csrc/moe/prepare_moe_input.cu"
337

Lianmin Zheng's avatar
Lianmin Zheng committed
338
    "csrc/quantization/gguf/gguf_kernel.cu"
339
    "csrc/speculative/eagle_utils.cu"
340
    "csrc/speculative/ngram_utils.cu"
341
342
    "csrc/speculative/packbit.cu"
    "csrc/speculative/speculative_sampling.cu"
343

344
345
346
    "${repo-flashinfer_SOURCE_DIR}/csrc/norm.cu"
    "${repo-flashinfer_SOURCE_DIR}/csrc/renorm.cu"
    "${repo-flashinfer_SOURCE_DIR}/csrc/sampling.cu"
347

348
349
350
    "${repo-fast-hadamard-transform_SOURCE_DIR}/csrc/fast_hadamard_transform_cuda.cu"
    "${repo-fast-hadamard-transform_SOURCE_DIR}/csrc/fast_hadamard_transform.cpp"

351
352
353
354
355
    "${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/src/flash_fwd_sparse_hdim128_bf16_causal_sm80.cu"
    "${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/src/flash_fwd_sparse_hdim128_bf16_sm80.cu"
    "${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/src/flash_fwd_sparse_hdim128_fp16_causal_sm80.cu"
    "${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/src/flash_fwd_sparse_hdim128_fp16_sm80.cu"
    "${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/flash_sparse_api.cpp"
356
357
)

Lianmin Zheng's avatar
Lianmin Zheng committed
358
set(INCLUDES
359
360
361
362
363
    ${repo-cutlass_SOURCE_DIR}/include
    ${repo-cutlass_SOURCE_DIR}/tools/util/include
    ${repo-flashinfer_SOURCE_DIR}/include
    ${repo-flashinfer_SOURCE_DIR}/csrc
    ${repo-mscclpp_SOURCE_DIR}/include
364
365
366
367
    ${repo-cutlass_SOURCE_DIR}/examples/77_blackwell_fmha
    ${repo-cutlass_SOURCE_DIR}/examples/common
    ${repo-flash-attention_SOURCE_DIR}/csrc/flash_attn/src
)
Lianmin Zheng's avatar
Lianmin Zheng committed
368
369
370
371
372
373
374
375
376

# =========================== Common SM90 Build ============================= #
# Build SM90 library with fast math optimization (same namespace, different directory)
Python_add_library(common_ops_sm90_build MODULE USE_SABI ${SKBUILD_SABI_VERSION} WITH_SOABI ${SOURCES})

target_compile_options(common_ops_sm90_build PRIVATE
    $<$<COMPILE_LANGUAGE:CUDA>:${SGL_KERNEL_CUDA_FLAGS} -use_fast_math>
)
target_include_directories(common_ops_sm90_build PRIVATE ${INCLUDES})
377
378
379
380
381
382
# Set output name and separate build directory to avoid conflicts
set_target_properties(common_ops_sm90_build PROPERTIES
    OUTPUT_NAME "common_ops"
    LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/sm90"
)

383
# =========================== Common SM100+ Build ============================= #
384
385
# Build SM100+ library with precise math (same namespace, different directory)
Python_add_library(common_ops_sm100_build MODULE USE_SABI ${SKBUILD_SABI_VERSION} WITH_SOABI ${SOURCES})
386

387
388
389
target_compile_options(common_ops_sm100_build PRIVATE
    $<$<COMPILE_LANGUAGE:CUDA>:${SGL_KERNEL_CUDA_FLAGS}>
)
Lianmin Zheng's avatar
Lianmin Zheng committed
390
target_include_directories(common_ops_sm100_build PRIVATE ${INCLUDES})
391
392
393
394
395
# Set output name and separate build directory to avoid conflicts
set_target_properties(common_ops_sm100_build PROPERTIES
    OUTPUT_NAME "common_ops"
    LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/sm100"
)
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411

find_package(Python3 COMPONENTS Interpreter REQUIRED)
execute_process(
    COMMAND ${Python3_EXECUTABLE} -c "import torch; print(int(torch._C._GLIBCXX_USE_CXX11_ABI))"
    OUTPUT_VARIABLE TORCH_CXX11_ABI
    OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(TORCH_CXX11_ABI STREQUAL "0")
    message(STATUS "Using old C++ ABI (-D_GLIBCXX_USE_CXX11_ABI=0)")
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
    set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
else()
    message(STATUS "Using new C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI=1)")
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=1")
    set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=1")
endif()
412

413
# mscclpp option
414
415
416
set(MSCCLPP_USE_CUDA ON)
set(MSCCLPP_BYPASS_GPU_CHECK ON)
set(MSCCLPP_BUILD_TESTS OFF)
417
418
419
420
add_subdirectory(
    ${repo-mscclpp_SOURCE_DIR}
    ${CMAKE_CURRENT_BINARY_DIR}/mscclpp-build
)
Lianmin Zheng's avatar
Lianmin Zheng committed
421

422
423
target_link_libraries(common_ops_sm90_build PRIVATE ${TORCH_LIBRARIES} c10 cuda cublas cublasLt mscclpp_static)
target_link_libraries(common_ops_sm100_build PRIVATE ${TORCH_LIBRARIES} c10 cuda cublas cublasLt mscclpp_static)
424

425
# sparse flash attention
426
427
428
429
430
431
target_compile_definitions(common_ops_sm90_build PRIVATE
    FLASHATTENTION_DISABLE_BACKWARD
    FLASHATTENTION_DISABLE_DROPOUT
    FLASHATTENTION_DISABLE_UNEVEN_K
)
target_compile_definitions(common_ops_sm100_build PRIVATE
432
433
434
435
    FLASHATTENTION_DISABLE_BACKWARD
    FLASHATTENTION_DISABLE_DROPOUT
    FLASHATTENTION_DISABLE_UNEVEN_K
)
436

437
438
439
440
441
# Install to different subdirectories
# CMake will find the built libraries in their respective LIBRARY_OUTPUT_DIRECTORY locations
# and install them to the specified destinations
install(TARGETS common_ops_sm90_build LIBRARY DESTINATION sgl_kernel/sm90)
install(TARGETS common_ops_sm100_build LIBRARY DESTINATION sgl_kernel/sm100)
442

Lianmin Zheng's avatar
Lianmin Zheng committed
443
# ============================ Optional Install: FA3 ============================= #
444
# set flash-attention sources file
445
# Now FA3 support sm80/sm86/sm90
446
if (SGL_KERNEL_ENABLE_FA3)
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
    set(SGL_FLASH_KERNEL_CUDA_FLAGS
        "-DNDEBUG"
        "-DOPERATOR_NAMESPACE=sgl-kernel"
        "-O3"
        "-Xcompiler"
        "-fPIC"
        "-gencode=arch=compute_90a,code=sm_90a"
        "-std=c++17"
        "-DCUTE_USE_PACKED_TUPLE=1"
        "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1"
        "-DCUTLASS_VERSIONS_GENERATED"
        "-DCUTLASS_TEST_LEVEL=0"
        "-DCUTLASS_TEST_ENABLE_CACHED_RESULTS=1"
        "-DCUTLASS_DEBUG_TRACE_LEVEL=0"
        "--expt-relaxed-constexpr"
        "--expt-extended-lambda"
        "--use_fast_math"
        "-Xcompiler=-Wconversion"
        "-Xcompiler=-fno-strict-aliasing"
    )

468
469
470
471
472
473
474
475
476
    if (ENABLE_BELOW_SM90)
        list(APPEND SGL_FLASH_KERNEL_CUDA_FLAGS
            "-gencode=arch=compute_80,code=sm_80"
            "-gencode=arch=compute_86,code=sm_86"
        )
        # SM8X Logic
        file(GLOB FA3_SM8X_GEN_SRCS
            "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdim*_sm80.cu")
    endif()
477

478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
    file(GLOB FA3_BF16_GEN_SRCS
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimall_bf16*_sm90.cu")
    file(GLOB FA3_BF16_GEN_SRCS_
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimdiff_bf16*_sm90.cu")
    list(APPEND FA3_BF16_GEN_SRCS ${FA3_BF16_GEN_SRCS_})

    # FP16 source files
    file(GLOB FA3_FP16_GEN_SRCS
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimall_fp16*_sm90.cu")
    file(GLOB FA3_FP16_GEN_SRCS_
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimdiff_fp16*_sm90.cu")
    list(APPEND FA3_FP16_GEN_SRCS ${FA3_FP16_GEN_SRCS_})

    # FP8 source files
    file(GLOB FA3_FP8_GEN_SRCS
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimall_e4m3*_sm90.cu")
    file(GLOB FA3_FP8_GEN_SRCS_
        "${repo-flash-attention_SOURCE_DIR}/hopper/instantiations/flash_fwd_hdimdiff_e4m3*_sm90.cu")
    list(APPEND FA3_FP8_GEN_SRCS ${FA3_FP8_GEN_SRCS_})

498
    set(FA3_GEN_SRCS ${FA3_BF16_GEN_SRCS} ${FA3_FP16_GEN_SRCS} ${FA3_FP8_GEN_SRCS} ${FA3_SM8X_GEN_SRCS})
499
500
501
502
503
504
505
506
507
508
509
510

    set(FLASH_SOURCES
        "csrc/flash_extension.cc"
        "${repo-flash-attention_SOURCE_DIR}/hopper/flash_prepare_scheduler.cu"
        "${repo-flash-attention_SOURCE_DIR}/hopper/flash_api.cpp"
        "${repo-flash-attention_SOURCE_DIR}/hopper/flash_fwd_combine.cu"
        "${FA3_GEN_SRCS}"
    )

    Python_add_library(flash_ops MODULE USE_SABI ${SKBUILD_SABI_VERSION} WITH_SOABI ${FLASH_SOURCES})

    target_compile_options(flash_ops PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:${SGL_FLASH_KERNEL_CUDA_FLAGS}>)
511
    target_include_directories(flash_ops PRIVATE
512
513
        ${repo-cutlass_SOURCE_DIR}/include
        ${repo-cutlass_SOURCE_DIR}/tools/util/include
514
515
        ${repo-flash-attention_SOURCE_DIR}/hopper
    )
516
517
518
    target_link_libraries(flash_ops PRIVATE ${TORCH_LIBRARIES} c10 cuda)

    install(TARGETS flash_ops LIBRARY DESTINATION "sgl_kernel")
519
    set(FLASH_OPS_COMPILE_DEFS
520
521
522
523
524
        FLASHATTENTION_DISABLE_BACKWARD
        FLASHATTENTION_DISABLE_DROPOUT
        FLASHATTENTION_DISABLE_UNEVEN_K
        FLASHATTENTION_VARLEN_ONLY
    )
525
526
527
528
529

    if(NOT ENABLE_BELOW_SM90)
        list(APPEND FLASH_OPS_COMPILE_DEFS FLASHATTENTION_DISABLE_SM8x)
    endif()
    target_compile_definitions(flash_ops PRIVATE ${FLASH_OPS_COMPILE_DEFS})
530
531
endif()

532
533
534
535
536
537
538
539
540
541
542
# Build spatial_ops as a separate, optional extension for green contexts
set(SPATIAL_SOURCES
    "csrc/spatial/greenctx_stream.cu"
    "csrc/spatial_extension.cc"
)

Python_add_library(spatial_ops MODULE USE_SABI ${SKBUILD_SABI_VERSION} WITH_SOABI ${SPATIAL_SOURCES})
target_compile_options(spatial_ops PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:${SGL_KERNEL_CUDA_FLAGS}>)
target_link_libraries(spatial_ops PRIVATE ${TORCH_LIBRARIES} c10 cuda)
install(TARGETS spatial_ops LIBRARY DESTINATION sgl_kernel)

Lianmin Zheng's avatar
Lianmin Zheng committed
543
# ============================ Extra Install: FLashMLA ============================= #
544
include(${CMAKE_CURRENT_LIST_DIR}/cmake/flashmla.cmake)
545

Lianmin Zheng's avatar
Lianmin Zheng committed
546
# ============================ Extra Install: DeepGEMM (JIT) ============================= #
547
548
549
550
551
# Create a separate library for DeepGEMM's Python API.
# This keeps its compilation isolated from the main common_ops.
set(DEEPGEMM_SOURCES
    "${repo-deepgemm_SOURCE_DIR}/csrc/python_api.cpp"
)
552

553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
Python_add_library(deep_gemm_cpp MODULE USE_SABI ${SKBUILD_SABI_VERSION} WITH_SOABI ${DEEPGEMM_SOURCES})

# Link against necessary libraries, including nvrtc for JIT compilation.
target_link_libraries(deep_gemm_cpp PRIVATE ${TORCH_LIBRARIES} c10 cuda nvrtc mscclpp_static)

# Add include directories needed by DeepGEMM.
target_include_directories(deep_gemm_cpp PRIVATE
    ${repo-deepgemm_SOURCE_DIR}/deep_gemm/include
    ${repo-cutlass_SOURCE_DIR}/include
    ${repo-fmt_SOURCE_DIR}/include
)

# Apply the same compile options as common_ops.
target_compile_options(deep_gemm_cpp PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:${SGL_KERNEL_CUDA_FLAGS}>)

# Create an empty __init__.py to make `deepgemm` a Python package.
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/deepgemm_pkg_init.py "")
install(
    FILES ${CMAKE_CURRENT_BINARY_DIR}/deepgemm_pkg_init.py
    DESTINATION deep_gemm
    RENAME __init__.py
)

# Install the compiled DeepGEMM API library.
install(TARGETS deep_gemm_cpp LIBRARY DESTINATION deep_gemm)

# Install the source files required by DeepGEMM for runtime JIT compilation.
install(
    DIRECTORY ${repo-deepgemm_SOURCE_DIR}/deep_gemm/
    DESTINATION deep_gemm
)
584
585
586
587
588
589

install(DIRECTORY "${repo-cutlass_SOURCE_DIR}/include/cute/"
        DESTINATION "deep_gemm/include/cute")

install(DIRECTORY "${repo-cutlass_SOURCE_DIR}/include/cutlass/"
        DESTINATION "deep_gemm/include/cutlass")
590

Lianmin Zheng's avatar
Lianmin Zheng committed
591
# ============================ Extra Install: triton kernels ============================= #
592
593
594
595
install(DIRECTORY "${repo-triton_SOURCE_DIR}/python/triton_kernels/triton_kernels/"
        DESTINATION "triton_kernels"
        PATTERN ".git*" EXCLUDE
        PATTERN "__pycache__" EXCLUDE)
596

Lianmin Zheng's avatar
Lianmin Zheng committed
597
# ============================ Extra Install: FA4 ============================= #
598
599
600
601
602
603
604
605
# TODO: find a better install condition.
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8" OR SGL_KERNEL_ENABLE_SM100A)
    # flash_attn/cute
    install(DIRECTORY "${repo-flash-attention-origin_SOURCE_DIR}/flash_attn/cute/"
            DESTINATION "flash_attn/cute"
            PATTERN ".git*" EXCLUDE
            PATTERN "__pycache__" EXCLUDE)
    endif()