CMakeLists.txt 20.1 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
2
3
#
# See LICENSE for license information.
4

5
cmake_minimum_required(VERSION 3.21)
6

yuguo's avatar
yuguo committed
7
8
9
10
11
12
13
14
15
option(USE_ROCM "Use ROCm" OFF)
option(USE_HIPBLASLT "Use HIPBLASLT" ON)
# Temp unsupport  aottriton\ck backend and Use ROCBLAS
option(USE_ROCBLAS "Use ROCBLAS" OFF)

if(NOT USE_ROCM)
  if(((EXISTS "/opt/dtk/") OR (EXISTS $ENV{ROCM_PATH})) AND NOT (EXISTS "/bin/nvcc"))
    message("hcu detected.")
    set(USE_ROCM ON)
16
  endif()
17
18
endif()

yuguo's avatar
yuguo committed
19
20
21
22
23
24
25
26
27
if (USE_ROCM)
  add_compile_definitions(__HIP_CLANG_ONLY__=1)
  if (NOT USE_HIPBLASLT AND NOT USE_ROCBLAS)
    message(FATAL_ERROR "Need specify at least one GEMM library to use: HIPBLASLT or ROCBLAS")
  endif()
  unset(USE_CUDA)
else()
  set(USE_CUDA TRUE)
endif()
28

29

yuguo's avatar
yuguo committed
30
31
32
# Language options
if(USE_CUDA)
  if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
33
34
35
    if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL 13.0)
      set(CMAKE_CUDA_ARCHITECTURES 75 80 89 90 100 120)
    elseif (CUDAToolkit_VERSION VERSION_GREATER_EQUAL 12.8)
yuguo's avatar
yuguo committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
      set(CMAKE_CUDA_ARCHITECTURES 70 80 89 90 100 120)
    else ()
      set(CMAKE_CUDA_ARCHITECTURES 70 80 89 90)
    endif()
  endif()
  set(CMAKE_CXX_STANDARD 17)
  set(CMAKE_CUDA_STANDARD 17)
  set(CMAKE_CUDA_STANDARD_REQUIRED ON)
  if (CMAKE_BUILD_TYPE STREQUAL "Debug")
    set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g -G")
  endif()
  
  # Hide non-necessary symbols in shared object.
  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/libtransformer_engine.version")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/libtransformer_engine.version")
  
  # Transformer Engine library
  project(transformer_engine LANGUAGES CUDA CXX)
  
  # CUDA Toolkit
  find_package(CUDAToolkit REQUIRED)
  if (CUDAToolkit_VERSION VERSION_LESS 12.0)
    message(FATAL_ERROR "CUDA 12.0+ is required, but found CUDA ${CUDAToolkit_VERSION}")
  endif()
  
  # cuDNN frontend API
  set(CUDNN_FRONTEND_INCLUDE_DIR
      "${CMAKE_CURRENT_SOURCE_DIR}/../../3rdparty/cudnn-frontend/include")
  if(NOT EXISTS "${CUDNN_FRONTEND_INCLUDE_DIR}")
      message(FATAL_ERROR
              "Could not find cuDNN frontend API at ${CUDNN_FRONTEND_INCLUDE_DIR}. "
              "Try running 'git submodule update --init --recursive' "
              "within the Transformer Engine source.")
  endif()
  include(${CMAKE_CURRENT_SOURCE_DIR}/../../3rdparty/cudnn-frontend/cmake/cuDNN.cmake)
else()
  set(CMAKE_CXX_STANDARD 17)
  project(transformer_engine LANGUAGES HIP CXX)
  
  # Disable Asserts In Code (Can't use asserts on HIP stack.)
  add_definitions(-DNDEBUG)
  add_definitions(-DUSE_ROCM)
78
  add_definitions(-DHIP_ENABLE_WARP_SYNC_BUILTINS)
yuguo's avatar
yuguo committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
  # Change clang++ to hipcc 
  SET(CMAKE_CXX_COMPILER "${ROCM_PATH}/bin/hipcc")  
  
  if(NOT DEFINED ENV{NVTE_ROCM_ARCH})
    SET(CMAKE_HIP_ARCHITECTURES gfx906;gfx926;gfx928;gfx936)
  else()
    SET(CMAKE_HIP_ARCHITECTURES $ENV{NVTE_ROCM_ARCH})
  endif()
  
  # build error will be dup-ed parallel-jobs times
  # set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -parallel-jobs=4")
  if(CMAKE_BUILD_TYPE STREQUAL "Debug")
    set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -g")
  endif()
  
  list(APPEND CMAKE_MODULE_PATH "/opt/dtk")
95
endif()
96

yuguo's avatar
yuguo committed
97
98
99
100
101
102
set(message_line "-------------------------------------------------------------")
message("${message_line}")
message(STATUS "USE_ROCM ${USE_ROCM}")
if(USE_ROCM)
  message(STATUS "CMAKE_HIP_ARCHITECTURES: ${CMAKE_HIP_ARCHITECTURES}")
  message(STATUS "USE_HIPBLASLT ${USE_HIPBLASLT} USE_ROCBLAS ${USE_ROCBLAS}")
103
104
endif()

105
106
107
108
109
set(CUTLASS_INCLUDE_DIR
  "${CMAKE_CURRENT_SOURCE_DIR}/../../3rdparty/cutlass/include")
set(CUTLASS_TOOLS_INCLUDE_DIR
  "${CMAKE_CURRENT_SOURCE_DIR}/../../3rdparty/cutlass/tools/util/include")

110
# Python
111
112
find_package(Python COMPONENTS Interpreter Development.Module REQUIRED)

113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# NVIDIA MathDX include directory (from Python package install location)
if(NOT DEFINED MATHDX_INCLUDE_DIR)
  execute_process(
    COMMAND ${Python_EXECUTABLE} -m pip show nvidia-mathdx
    OUTPUT_VARIABLE _PIP_SHOW_MATHDX
    ERROR_VARIABLE _PIP_SHOW_MATHDX_ERR
    RESULT_VARIABLE _PIP_SHOW_MATHDX_RES
    OUTPUT_STRIP_TRAILING_WHITESPACE)
  if(NOT _PIP_SHOW_MATHDX_RES EQUAL 0)
    message(FATAL_ERROR "Failed to query 'nvidia-mathdx' with pip (using ${Python_EXECUTABLE}): ${_PIP_SHOW_MATHDX_ERR}")
  endif()
  string(REGEX MATCH "Location: ([^\n\r]+)" _MATHDX_LOC_MATCH "${_PIP_SHOW_MATHDX}")
  if(NOT _MATHDX_LOC_MATCH)
    message(FATAL_ERROR "Could not parse installation location for 'nvidia-mathdx'. Output was:\n${_PIP_SHOW_MATHDX}")
  endif()
  set(MATHDX_LOCATION "${CMAKE_MATCH_1}")
  set(MATHDX_INCLUDE_DIR "${MATHDX_LOCATION}/nvidia/mathdx/include")
endif()
if(NOT EXISTS "${MATHDX_INCLUDE_DIR}")
  message(FATAL_ERROR "MATHDX include directory not found at ${MATHDX_INCLUDE_DIR}. Set MATHDX_INCLUDE_DIR or ensure 'nvidia-mathdx' is installed for ${Python_EXECUTABLE}.")
endif()

135
# Configure Transformer Engine library
136
include_directories(${PROJECT_SOURCE_DIR}/..)
137
set(transformer_engine_SOURCES)
yuguo's avatar
yuguo committed
138
139
140
141
142
143

if(USE_CUDA)
  list(APPEND transformer_engine_SOURCES
       cudnn_utils.cpp
       transformer_engine.cpp
       common.cu
144
145
146
147
148
       multi_tensor/adam.cu
       multi_tensor/compute_scale.cu
       multi_tensor/l2norm.cu
       multi_tensor/scale.cu
       multi_tensor/sgd.cu
yuguo's avatar
yuguo committed
149
150
151
152
153
       transpose/cast_transpose.cu
       transpose/transpose.cu
       transpose/cast_transpose_fusion.cu
       transpose/transpose_fusion.cu
       transpose/multi_cast_transpose.cu
154
155
       transpose/quantize_transpose_square_blockwise.cu
       transpose/quantize_transpose_vector_blockwise.cu
156
       transpose/swap_first_dims.cu
wenjh's avatar
wenjh committed
157
       transpose/quantize_transpose_vector_blockwise_fp4.cu
yuguo's avatar
yuguo committed
158
       activation/gelu.cu
wenjh's avatar
wenjh committed
159
       dropout/dropout.cu
160
161
162
       fused_attn/flash_attn.cu
       fused_attn/context_parallel.cu
       fused_attn/kv_cache.cu
yuguo's avatar
yuguo committed
163
164
165
166
167
168
169
       fused_attn/fused_attn_f16_max512_seqlen.cu
       fused_attn/fused_attn_f16_arbitrary_seqlen.cu
       activation/relu.cu
       activation/swiglu.cu
       fused_attn/fused_attn_fp8.cu
       fused_attn/fused_attn.cpp
       fused_attn/utils.cu
wenjh's avatar
wenjh committed
170
       gemm/config.cpp
yuguo's avatar
yuguo committed
171
       gemm/cublaslt_gemm.cu
wenjh's avatar
wenjh committed
172
       gemm/cutlass_grouped_gemm.cu
yuguo's avatar
yuguo committed
173
174
175
176
177
178
179
180
181
182
183
184
185
       normalization/common.cpp
       normalization/layernorm/ln_api.cpp
       normalization/layernorm/ln_bwd_semi_cuda_kernel.cu
       normalization/layernorm/ln_fwd_cuda_kernel.cu
       normalization/rmsnorm/rmsnorm_api.cpp
       normalization/rmsnorm/rmsnorm_bwd_semi_cuda_kernel.cu
       normalization/rmsnorm/rmsnorm_fwd_cuda_kernel.cu
       permutation/permutation.cu
       util/cast.cu
       util/padding.cu
       util/cuda_driver.cpp
       util/cuda_nvml.cpp
       util/cuda_runtime.cpp
186
       util/multi_stream.cpp
yuguo's avatar
yuguo committed
187
188
       util/rtc.cpp
       swizzle/swizzle.cu
wenjh's avatar
wenjh committed
189
       swizzle/swizzle_block_scaling.cu
yuguo's avatar
yuguo committed
190
191
192
193
       fused_softmax/scaled_masked_softmax.cu
       fused_softmax/scaled_upper_triang_masked_softmax.cu
       fused_softmax/scaled_aligned_causal_masked_softmax.cu
       fused_rope/fused_rope.cu
194
195
196
       fused_router/fused_moe_aux_loss.cu
       fused_router/fused_score_for_moe_aux_loss.cu
       fused_router/fused_topk_with_score_function.cu
yuguo's avatar
yuguo committed
197
198
       recipe/current_scaling.cu
       recipe/delayed_scaling.cu
199
       recipe/fp8_block_scaling.cu
wenjh's avatar
wenjh committed
200
201
202
       recipe/nvfp4.cu
       hadamard_transform/hadamard_transform.cu
       hadamard_transform/hadamard_transform_cast_fusion.cu
yuguo's avatar
yuguo committed
203
204
205
206
       comm_gemm_overlap/userbuffers/ipcsocket.cc
       comm_gemm_overlap/userbuffers/userbuffers-host.cpp
       comm_gemm_overlap/userbuffers/userbuffers.cu
       comm_gemm_overlap/comm_gemm_overlap.cpp)
wenjh's avatar
wenjh committed
207
208
209
210
  if (NVTE_WITH_CUBLASMP)
    list(APPEND transformer_engine_SOURCES
      comm_gemm/comm_gemm.cpp)
  endif()
yuguo's avatar
yuguo committed
211
212
213
214
215
216
  add_library(transformer_engine SHARED ${transformer_engine_SOURCES})
else()
  list(APPEND transformer_engine_SOURCES
       cudnn_utils.cpp
       transformer_engine.cpp
       common.cu
217
218
219
       fused_attn/flash_attn.cu
       fused_attn/context_parallel.cu
       fused_attn/kv_cache.cu
220
221
222
223
224
       multi_tensor/adam.cu
       multi_tensor/compute_scale.cu
       multi_tensor/l2norm.cu
       multi_tensor/scale.cu
       multi_tensor/sgd.cu
yuguo's avatar
yuguo committed
225
226
227
228
229
       transpose/cast_transpose.cu
       transpose/transpose.cu
       transpose/cast_transpose_fusion.cu
       transpose/transpose_fusion.cu
       transpose/multi_cast_transpose.cu
yuguo's avatar
yuguo committed
230
231
       transpose/quantize_transpose_square_blockwise.cu
       transpose/quantize_transpose_vector_blockwise.cu
232
       transpose/swap_first_dims.cu
yuguo's avatar
yuguo committed
233
       activation/gelu.cu
wenjh's avatar
wenjh committed
234
       dropout/dropout.cu
yuguo's avatar
yuguo committed
235
236
       activation/relu.cu
       activation/swiglu.cu
wenjh's avatar
wenjh committed
237
       gemm/config.cpp
yuguo's avatar
yuguo committed
238
       gemm/cublaslt_gemm.cu
yuguo's avatar
yuguo committed
239
       gemm/hipblas_gemm.cu
yuguo's avatar
yuguo committed
240
241
242
243
244
245
246
247
248
249
250
251
252
       normalization/common.cpp
       normalization/layernorm/ln_api.cpp
       normalization/layernorm/ln_bwd_semi_cuda_kernel.cu
       normalization/layernorm/ln_fwd_cuda_kernel.cu
       normalization/rmsnorm/rmsnorm_api.cpp
       normalization/rmsnorm/rmsnorm_bwd_semi_cuda_kernel.cu
       normalization/rmsnorm/rmsnorm_fwd_cuda_kernel.cu
       permutation/permutation.cu
       util/cast.cu
       util/padding.cu
       util/cuda_driver.cpp
       util/cuda_nvml.cpp
       util/cuda_runtime.cpp
253
       util/multi_stream.cpp
yuguo's avatar
yuguo committed
254
255
       util/rtc.cpp
       swizzle/swizzle.cu
wenjh's avatar
wenjh committed
256
       swizzle/swizzle_block_scaling.cu
yuguo's avatar
yuguo committed
257
258
259
260
       fused_softmax/scaled_masked_softmax.cu
       fused_softmax/scaled_upper_triang_masked_softmax.cu
       fused_softmax/scaled_aligned_causal_masked_softmax.cu
       fused_rope/fused_rope.cu
261
262
263
       fused_router/fused_moe_aux_loss.cu
       fused_router/fused_score_for_moe_aux_loss.cu
       fused_router/fused_topk_with_score_function.cu
yuguo's avatar
yuguo committed
264
265
       recipe/current_scaling.cu
       recipe/delayed_scaling.cu
266
       recipe/fp8_block_scaling.cu
yuguo's avatar
yuguo committed
267
268
269
270
       comm_gemm_overlap/userbuffers/ipcsocket.cc
       comm_gemm_overlap/userbuffers/userbuffers-host.cpp
       comm_gemm_overlap/userbuffers/userbuffers.cu
       comm_gemm_overlap/comm_gemm_overlap.cpp)
wenjh's avatar
wenjh committed
271
272
273
274
  if (NVTE_WITH_CUBLASMP)
    list(APPEND transformer_engine_SOURCES
      comm_gemm/comm_gemm.cpp)
  endif()
yuguo's avatar
yuguo committed
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  # process source code files
  message("${message_line}")
  message(STATUS "CMAKE_CURRENT_SOURCE_DIR: ${CMAKE_CURRENT_SOURCE_DIR}")
  message(STATUS "PROJECT_SOURCE_DIR: ${PROJECT_SOURCE_DIR}")

  set(TE ${CMAKE_CURRENT_SOURCE_DIR}/../..)
  set(THIRDPARTY ${TE}/3rdparty)
  list(APPEND CMAKE_MODULE_PATH "${THIRDPARTY}/hipify_torch/cmake")
  include(Hipify)
  message(STATUS "CMAKE_MODULE_PATH: ${CMAKE_MODULE_PATH}")

  set(header_include_dir
      ${CMAKE_CURRENT_SOURCE_DIR}/comm_gemm_overlap/userbuffers
      ${CMAKE_CURRENT_SOURCE_DIR}/activation 
289
      ${CMAKE_CURRENT_SOURCE_DIR}/fused_attn
yuguo's avatar
yuguo committed
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
      ${CMAKE_CURRENT_SOURCE_DIR}/include 
      ${CMAKE_CURRENT_SOURCE_DIR}/transpose
      ${CMAKE_CURRENT_SOURCE_DIR}/util
      ${CMAKE_CURRENT_SOURCE_DIR}/normalization
      ${CMAKE_CURRENT_SOURCE_DIR}/normalization/rmsnorm
      ${CMAKE_CURRENT_SOURCE_DIR}/normalization/layernorm 
      ${CMAKE_CURRENT_SOURCE_DIR})
  message(STATUS "HIPIFY CUDA_SOURCE_DIR: ${CMAKE_CURRENT_SOURCE_DIR}")
  message(STATUS "HIPIFY HEADER_INCLUDE_DIR: ${header_include_dir}")
  hipify(CUDA_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}
      HEADER_INCLUDE_DIR ${header_include_dir}
      IGNORES "*/amd_detail/*"
      CUSTOM_MAP_FILE "${TE}/hipify_custom_map.json"
  )
  get_hipified_list("${transformer_engine_SOURCES}" te_hip_sources)
  message("${message_line}")
  message(STATUS "nvte hipified sources: ${te_hip_sources}")

  add_library(transformer_engine SHARED ${te_hip_sources})
endif()
310

yuguo's avatar
yuguo committed
311
target_include_directories(transformer_engine PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
312
# Configure dependencies
yuguo's avatar
yuguo committed
313
314
315
316
if (USE_CUDA)
  # Configure dependencies
  target_link_libraries(transformer_engine PUBLIC
                        CUDA::cublas
317
318
                        CUDA::cudart
                        CUDNN::cudnn_all)
yuguo's avatar
yuguo committed
319
  target_include_directories(transformer_engine PRIVATE
320
                          ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
wenjh's avatar
wenjh committed
321
322
  target_include_directories(transformer_engine SYSTEM PRIVATE
                          ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}/cccl)
yuguo's avatar
yuguo committed
323
  target_include_directories(transformer_engine PRIVATE "${CUDNN_FRONTEND_INCLUDE_DIR}")
wenjh's avatar
wenjh committed
324
  target_include_directories(transformer_engine PRIVATE
325
326
                          ${CUTLASS_INCLUDE_DIR}
                          ${CUTLASS_TOOLS_INCLUDE_DIR})
yuguo's avatar
yuguo committed
327
else()
yuguo's avatar
yuguo committed
328
  target_include_directories(transformer_engine PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
yuguo's avatar
yuguo committed
329
330
331
332
333
  # Aotriton is currently unsupported 
  set(AotritonAndCk_fused_attn "unsupported")

  find_package(hip)
  list(APPEND transformer_engine_LINKER_LIBS hip::host hip::device roctx64)
yuguo's avatar
yuguo committed
334
335
  find_package(rccl)
  list(APPEND transformer_engine_LINKER_LIBS rccl)
yuguo's avatar
yuguo committed
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  if(USE_HIPBLASLT)
    find_package(hipblaslt)
    find_package(hipblas REQUIRED PATHS ${ROCM_PATH})
    target_compile_definitions(transformer_engine PUBLIC USE_HIPBLASLT)
    list(APPEND transformer_engine_LINKER_LIBS roc::hipblaslt hipblas)
  endif()
  if(USE_ROCBLAS)
    find_package(rocblas)
    target_compile_definitions(transformer_engine PUBLIC USE_ROCBLAS)
    list(APPEND transformer_engine_LINKER_LIBS roc::rocblas)
  endif()
  target_link_libraries(transformer_engine PUBLIC ${transformer_engine_LINKER_LIBS})
endif()

350

351
352
353
354
355
356
357
358
359
# Compiling Userbuffers with native MPI bootstrapping requires linking against MPI
option(NVTE_UB_WITH_MPI "Bootstrap Userbuffers with MPI" OFF)
if (NVTE_UB_WITH_MPI)
    find_package(MPI REQUIRED)
    target_link_libraries(transformer_engine PUBLIC MPI::MPI_CXX)
    target_include_directories(transformer_engine PRIVATE ${MPI_CXX_INCLUDES})
    target_compile_definitions(transformer_engine PUBLIC NVTE_UB_WITH_MPI)
endif()

360
361
362
363
364
365
366
option(NVTE_ENABLE_NVSHMEM "Compile with NVSHMEM library" OFF)
if (NVTE_ENABLE_NVSHMEM)
    add_subdirectory(nvshmem_api)
    target_link_libraries(transformer_engine PUBLIC nvshmemapi)
    target_include_directories(transformer_engine PUBLIC ${NVSHMEMAPI_INCLUDE_DIR})
endif()

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
option(NVTE_WITH_CUBLASMP "Use cuBLASMp for tensor parallel GEMMs" OFF)
if (NVTE_WITH_CUBLASMP)
    target_compile_definitions(transformer_engine PRIVATE NVTE_WITH_CUBLASMP)
    target_include_directories(transformer_engine PRIVATE ${CUBLASMP_DIR}/include ${NVSHMEM_DIR}/include)
    find_library(CUBLASMP_LIB
                 NAMES cublasmp libcublasmp
                 PATHS ${CUBLASMP_DIR}
                 PATH_SUFFIXES lib
                 REQUIRED)
    find_library(NVSHMEM_HOST_LIB
                 NAMES nvshmem_host libnvshmem_host.so.3
                 PATHS ${NVSHMEM_DIR}
                 PATH_SUFFIXES lib
                 REQUIRED)
  target_link_libraries(transformer_engine PUBLIC ${CUBLASMP_LIB} ${NVSHMEM_HOST_LIB})
  message(STATUS "Using cuBLASMp at: ${CUBLASMP_DIR}")
  message(STATUS "Using nvshmem at: ${NVSHMEM_DIR}")
384
385
endif()

yuguo's avatar
yuguo committed
386
387
388
389
if (USE_CUDA)
  # Hack to enable dynamic loading in cuDNN frontend
  target_compile_definitions(transformer_engine PUBLIC NV_CUDNN_FRONTEND_USE_DYNAMIC_LOADING)
endif()
390
391

# Helper functions to make header files with C++ strings
Tim Moon's avatar
Tim Moon committed
392
393
394
395
396
397
398
399
400
401
402
function(make_string_header STRING STRING_NAME)
    configure_file(util/string_header.h.in
                   "string_headers/${STRING_NAME}.h"
                   @ONLY)
endfunction()
function(make_string_header_from_file file_ STRING_NAME)
    file(READ "${file_}" STRING)
    configure_file(util/string_header.h.in
                   "string_headers/${STRING_NAME}.h"
                   @ONLY)
endfunction()
403
404

# Header files with C++ strings
yuguo's avatar
yuguo committed
405
406
407
408
409
410
411
412
413
414
if(USE_CUDA)
  list(GET CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES 0 cuda_include_path)
  make_string_header("${cuda_include_path}"
                     string_path_cuda_include)
  make_string_header_from_file(transpose/rtc/cast_transpose_fusion.cu
                               string_code_transpose_rtc_cast_transpose_fusion_cu)
  make_string_header_from_file(transpose/rtc/cast_transpose.cu
                               string_code_transpose_rtc_cast_transpose_cu)
  make_string_header_from_file(transpose/rtc/transpose.cu
                               string_code_transpose_rtc_transpose_cu)
415
416
  make_string_header_from_file(transpose/rtc/swap_first_dims.cu
                               string_code_transpose_rtc_swap_first_dims_cu)                             
yuguo's avatar
yuguo committed
417
418
419
420
421
422
423
424
425
426
427
  make_string_header_from_file(utils.cuh
                               string_code_utils_cuh)
else()
  make_string_header_from_file(utils_hip.cuh
                               string_code_utils_cuh)
  make_string_header_from_file(transpose/rtc/cast_transpose_fusion.hip
                              string_code_transpose_rtc_cast_transpose_fusion_cu)
  make_string_header_from_file(transpose/rtc/cast_transpose.hip
                              string_code_transpose_rtc_cast_transpose_cu)
  make_string_header_from_file(transpose/rtc/transpose.hip
                              string_code_transpose_rtc_transpose_cu)
428
429
  make_string_header_from_file(transpose/rtc/swap_first_dims.cu
                              string_code_transpose_rtc_swap_first_dims_cu)
yuguo's avatar
yuguo committed
430
431
432
endif()


433
434
make_string_header_from_file(util/math.h
                             string_code_util_math_h)
Tim Moon's avatar
Tim Moon committed
435
436
437
target_include_directories(transformer_engine PRIVATE
                           "${CMAKE_CURRENT_BINARY_DIR}/string_headers")

438
# Compiler options
439
440
set_source_files_properties(fused_softmax/scaled_masked_softmax.cu
                            fused_softmax/scaled_upper_triang_masked_softmax.cu
441
                            fused_softmax/scaled_aligned_causal_masked_softmax.cu
442
443
444
445
446
                            multi_tensor/adam.cu
                            multi_tensor/compute_scale.cu
                            multi_tensor/l2norm.cu
                            multi_tensor/scale.cu
                            multi_tensor/sgd.cu
447
448
449
                            fused_attn/flash_attn.cu
                            fused_attn/context_parallel.cu
                            fused_attn/kv_cache.cu
450
451
                            PROPERTIES
                            COMPILE_OPTIONS "--use_fast_math")
452
453
454
455
456
option(NVTE_BUILD_ACTIVATION_WITH_FAST_MATH "Compile activation kernels with --use_fast_math option" OFF)
if (NVTE_BUILD_ACTIVATION_WITH_FAST_MATH)
  set_source_files_properties(activation/gelu.cu
                              activation/relu.cu
                              activation/swiglu.cu
457
                              util/cast.cu
458
459
460
                              PROPERTIES
                              COMPILE_OPTIONS "--use_fast_math")
endif()
yuguo's avatar
yuguo committed
461
462
463
464
465

if(USE_CUDA)
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O3")
else()
466
467
468
469
470
471
472
473
474
475
476
477
  option(NVTE_BUILD_SUPPRESS_UNUSED_WARNING "Suppress unused* wanings while build" ON)
  option(NVTE_BUILD_SUPPRESS_RETURN_TYPE_WARNING "Suppress return type waning while build" OFF)
  option(NVTE_BUILD_SUPPRESS_SIGN_COMPARE_WARNING "Suppress sign compare waning while build" OFF)
  if(NVTE_BUILD_SUPPRESS_UNUSED_WARNING)
    set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wno-unused-result -Wno-unused-function -Wno-unused-private-field -Wno-unused-variable")
  endif()
  if(NVTE_BUILD_SUPPRESS_RETURN_TYPE_WARNING)
    set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wno-return-type")
  endif()
  if(NVTE_BUILD_SUPPRESS_SIGN_COMPARE_WARNING)
    set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wno-sign-compare")
  endif()
yuguo's avatar
yuguo committed
478
479
480
481
  set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -O3")
  set(HIP_HCC_FLAGS "${CMAKE_HIP_FLAGS} -mavx2 -mf16c -mfma -std=c++17")
  # Ask hcc to generate device code during compilation so we can use
  # host linker to link.
yuguo's avatar
yuguo committed
482
  set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -fno-gpu-rdc -w")
yuguo's avatar
yuguo committed
483
484
485
486
487
488
  foreach(rocm_arch ${CMAKE_HIP_ARCHITECTURES})
    # if CMAKE_CXX_FLAGS has --offload-arch set already, better to rm first
    set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} --offload-arch=${rocm_arch}")
  endforeach()
  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${HIP_HCC_FLAGS}")
endif()
Tim Moon's avatar
Tim Moon committed
489

490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
# Number of parallel build jobs
if(ENV{MAX_JOBS})
  set(BUILD_JOBS_STR "$ENV{MAX_JOBS}")
elseif(ENV{NVTE_BUILD_MAX_JOBS})
  set(BUILD_JOBS_STR "$ENV{NVTE_BUILD_MAX_JOBS}")
else()
  set(BUILD_JOBS_STR "max")
endif()
message(STATUS "Parallel build jobs: ${BUILD_JOBS_STR}")

# Number of threads per parallel build job
set(BUILD_THREADS_PER_JOB $ENV{NVTE_BUILD_THREADS_PER_JOB})
if (NOT BUILD_THREADS_PER_JOB)
  set(BUILD_THREADS_PER_JOB 1)
endif()
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --threads ${BUILD_THREADS_PER_JOB}")
message(STATUS "Threads per parallel build job: ${BUILD_THREADS_PER_JOB}")

Tim Moon's avatar
Tim Moon committed
508
509
# Install library
install(TARGETS transformer_engine DESTINATION .)