builtin.h 10.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
/*!
 * \file tl/op/builtin.h
 * \brief Builtin intrinsics.
 *
 */

#ifndef TVM_TL_OP_BUILTIN_H_
#define TVM_TL_OP_BUILTIN_H_

10
#include "operator.h"
11
#include <tvm/ir/transform.h>
12
13

namespace tvm {
14
15
16
17
18
19
20
21
/*!
 * \brief Create the TVM intrinsic that initializes a PTX fence barrier.
 *
 * Initializes a PTX fence-style barrier used to coordinate asynchronous memory
 * operations (for example, TMA/TMA_STORE). Returns the Op representing this
 * intrinsic for use in TIR lowering and code generation.
 *
 */
22
namespace tl {
23
24
25

namespace attr {
static constexpr const char *kPaddingMap = "padding_map";
26
27
static constexpr const char *kWarpSpecializationScope =
    "kWarpSpecializationScope";
28
29
static constexpr const char *kCustomWarpSpecialization =
    "kCustomWarpSpecialization";
30
31
} // namespace attr

32
33
static constexpr const char *kDebugMergeSharedMemoryAllocations =
    "tl.debug_merge_shared_memory_allocations";
34
static constexpr const char *kDisableTMALower = "tl.disable_tma_lower";
35
36
static constexpr const char *kDisableSafeMemoryLegalize =
    "tl.disable_safe_memory_legalize";
37
38
static constexpr const char *kDisableWarpSpecialized =
    "tl.disable_warp_specialized";
39
static constexpr const char *kConfigIndexBitwidth = "tl.config_index_bitwidth";
40
41
static constexpr const char *kEnableAggressiveSharedMemoryMerge =
    "tl.enable_aggressive_shared_memory_merge";
42
static constexpr const char *kDisableFastMath = "tl.disable_fast_math";
43
static constexpr const char *kEnableFastMath = "tl.enable_fast_math";
44
45
static constexpr const char *kPtxasRegisterUsageLevel =
    "tl.ptxas_register_usage_level";
46
47
static constexpr const char *kEnablePTXASVerboseOutput =
    "tl.enable_ptxas_verbose_output";
48
static constexpr const char *kDisableShuffleElect = "tl.disable_shuffle_elect";
49
50
51
52
53
54
55
56
57
/*!
 * \brief Whether to disable dynamic tail split
 *
 * kDisableDynamicTailSplit = "tl.disable_dynamic_tail_split"
 *
 */
static constexpr const char *kDisableDynamicTailSplit =
    "tl.disable_dynamic_tail_split";

58
59
60
61
62
63
64
65
66
67
68
69
70
71
/*!
 * \brief Whether to disable thread storage synchronization
 *
 * When enabled, disables the automatic insertion of thread synchronization
 * barriers (e.g., __syncthreads()) for shared memory access coordination.
 * This can be useful for performance optimization in cases where manual
 * synchronization is preferred or when synchronization is not needed.
 *
 * kDisableThreadStorageSync = "tl.disable_thread_storage_sync"
 *
 */
static constexpr const char *kDisableThreadStorageSync =
    "tl.disable_thread_storage_sync";

72
73
74
75
76
77
78
79
80
81
82
83
/*!
 * \brief The size of the vectorized dimension in buffer, designed by user
 *
 * For example, if the vectorized dimension is 128 bits and the dtype of buffer
 * A[m, k] is float16, the size of the vectorized dimension (i.e. k) in buffer A
 * should be divisible by 8 (8 = 128 / 16).
 *
 * kDynamicAlignment = "tl.dynamic_alignment"
 *
 */
static constexpr const char *kDynamicAlignment = "tl.dynamic_alignment";

84
85
86
87
88
89
90
91
/*!
 * \brief Get the type of the CUDA tensor map
 *
 * DataType cuTensorMapType()
 *
 */
DataType cuTensorMapType();

92
// fast math related op
93
// __exp(x) - fast exponential
94
TVM_DLL const Op &__exp();
95
// __exp10(x) - fast base-10 exponential
96
TVM_DLL const Op &__exp10();
97
// __log(x) - fast natural logarithm
98
TVM_DLL const Op &__log();
99
// __log2(x) - fast base-2 logarithm
100
TVM_DLL const Op &__log2();
101
// __log10(x) - fast base-10 logarithm
102
TVM_DLL const Op &__log10();
103
// __tan(x) - fast tangent
104
TVM_DLL const Op &__tan();
105
// __cos(x) - fast cosine
106
TVM_DLL const Op &__cos();
107
// __sin(x) - fast sine
108
109
TVM_DLL const Op &__sin();

110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// high precision with IEEE-compliant.
// ieee_add(x, y, rounding_mode) - IEEE-compliant addition
TVM_DLL const Op &ieee_add();
// ieee_sub(x, y, rounding_mode) - IEEE-compliant subtraction
TVM_DLL const Op &ieee_sub();
// ieee_mul(x, y, rounding_mode) - IEEE-compliant multiplication
TVM_DLL const Op &ieee_mul();
// ieee_fmaf(x, y, z, rounding_mode) - IEEE-compliant fused multiply-add
TVM_DLL const Op &ieee_fmaf();
// ieee_frcp(x, rounding_mode) - IEEE-compliant reciprocal
TVM_DLL const Op &ieee_frcp();
// ieee_fsqrt(x, rounding_mode) - IEEE-compliant square root
TVM_DLL const Op &ieee_fsqrt();
// ieee_frsqrt(x) - IEEE-compliant reciprocal square root (rn only)
TVM_DLL const Op &ieee_frsqrt();
// ieee_fdiv(x, y, rounding_mode) - IEEE-compliant division
TVM_DLL const Op &ieee_fdiv();

128
129
130
/*!
 * \brief tvm intrinsics for TMADescriptor creation for tiled load
 *
131
 * CuTensorMap* create_tma_descriptor(data_type, rank, global_addr,
132
133
 * global_shape..., global_stride..., smem_box..., smem_stride..., interleave,
 * swizzle, l2_promotion, oob_fill)
134
135
 *
 */
136
TVM_DLL const Op &create_tma_descriptor();
137
138
139
140

/*!
 * \brief tvm intrinsics for TMADescriptor creation for image to column load
 *
141
 * CuTensorMap* create_tma_im2col_descriptor(data_type, rank, global_addr,
142
143
144
 * global_shape..., global_stride..., elem_stride..., lower_corner...,
 * upper_corner..., smme_box_pixel, smem_box_channel, interleave, swizzle,
 * l2_promotion, oob_fill)
145
146
 *
 */
147
TVM_DLL const Op &create_tma_im2col_descriptor();
148
149
150
151

/*!
 * \brief Create a list of mbarrier with num_threads
 *
152
 * create_list_of_mbarrier(num_threads0, num_threads1, ...)
153
154
 *
 */
155
TVM_DLL const Op &create_list_of_mbarrier();
156
157
158
159
160
161
162

/*!
 * \brief Get the mbarrier with barrier_id
 *
 * int64_t* GetMBarrier(barrier_id)
 *
 */
163
TVM_DLL const Op &get_mbarrier();
164
165

/*!
166
167
 * \brief tvm intrinsics for loading data from global tensor descriptor to
 * shared memory
168
 *
169
 * tma_load(descriptor, mbarrier, smem_data, coord_0, coord_1, ...)
170
171
 *
 */
172
TVM_DLL const Op &tma_load();
173
174

/*!
175
176
 * \brief tvm intrinsics for loading image from global tensor to columns in
 * shared memory
177
 *
178
 * tma_load(descriptor, mbarrier, smem_data, coord_0, coord_1, ...,
179
 * image_offset, ...)
180
181
 *
 */
182
TVM_DLL const Op &tma_load_im2col();
183
184

/*!
185
186
 * \brief tvm intrinsics for storing data from shared memory to global tensor
 * descriptor
187
 *
188
 * tma_store(descriptor, smem_data, coord_0, coord_1, ...)
189
190
 *
 */
191
TVM_DLL const Op &tma_store();
192

193
194
195
196
197
198
199
200
/*!
 * \brief tvm intrinsics for barrier initialization fence
 *
 * ptx_fence_barrier_init()
 *
 */
const Op &ptx_fence_barrier_init();

201
202
203
/*!
 * \brief tvm intrinsics for mbarrier wait with parity bit
 *
204
 * mbarrier_wait_parity(mbarrier, parity)
205
206
 *
 */
207
TVM_DLL const Op &mbarrier_wait_parity();
208
209
210
211

/*!
 * \brief tvm intrinsics for mbarrier expect tx
 *
212
 * mbarrier_expect_tx(mbarrier, transaction_bytes)
213
214
 *
 */
215
TVM_DLL const Op &mbarrier_expect_tx();
216
217
218
219

/*!
 * \brief tvm intrinsics for ldmatrix
 *
220
 * ptx_ldmatrix(transposed, num, shared_addr, local_addr)
221
222
 *
 */
223
TVM_DLL const Op &ptx_ldmatrix();
224
225
226
227

/*!
 * \brief tvm intrinsics for stmatrix
 *
228
 * ptx_ldmatrix(transposed, num, shared_addr, int32_values...)
229
230
 *
 */
231
TVM_DLL const Op &ptx_stmatrix();
232

233
234
235
236
237
238
239
240
/*!
 * \brief tvm intrinsic for ptx async copy barrier using
 * cp.async.mbarrier.arrive.noinc
 *
 *  This op is used to represent a ptx async copy barrier operation in tilelang.
 */
TVM_DLL const Op &ptx_cp_async_barrier_noinc();

241
242
243
/*!
 * \brief Pack two b16 value into a b32 value
 *
244
 * int32 pack_b16(b16_value, b16_value)
245
246
 *
 */
247
TVM_DLL const Op &pack_b16();
248
249
250
251
252
253
254

/*!
 * \brief Issue a shared memory fence for async operations
 *
 * FenceProxyAsync()
 *
 */
255
TVM_DLL const Op &fence_proxy_async();
256

257
258
259
/*!
 * \brief Indicate arrival of warp issuing TMA_STORE
 *
260
 * tma_store_arrive()
261
262
 *
 */
263
TVM_DLL const Op &tma_store_arrive();
264
265
266
267

/*!
 * \brief Wait for TMA_STORE to finish
 *
268
 * tma_store_wait()
269
270
 *
 */
271
TVM_DLL const Op &tma_store_wait();
272

273
274
275
276
277
278
/*!
 * \brief Set reg hint for warp-specialized branched
 *
 * SetMaxNRegInc(num_reg, is_inc)
 *
 */
279
TVM_DLL const Op &set_max_nreg();
280

281
282
283
/*!
 * \brief No set reg hint for warp-specialized branched
 *
284
 * no_set_max_nreg()
285
286
 *
 */
287
TVM_DLL const Op &no_set_max_nreg();
288

289
290
291
/*!
 * \brief Wait the previous wgmma to finish
 *
292
 * wait_wgmma(num_mma)
293
294
 *
 */
295
TVM_DLL const Op &wait_wgmma();
296

297
298
299
300
301
302
/*!
 * \brief Synchronize all threads in a grid
 *
 * sync_grid()
 *
 */
303
TVM_DLL const Op &sync_grid();
304
305
306
307
308
309
310

/*!
 * \brief tvm intrinsic for loop continue
 *
 * loop_break()
 *
 */
311
TVM_DLL const Op &loop_break();
312

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
/*!
 * \brief tvm intrinsic for amd matrix core mfma instructions.
 *
 *  void tvm_mfma(StringImm shape, StringImm A_layout, StringImm B_layout,
 *               StringImm A_dtype, StringImm B_dtype, StringImm C_dtype,
 *               Var multiplicand_a, Expr a_index,
 *               Var multiplicand_b, Expr b_index,
 *               Var accumulator, Expr c_index);
 */
TVM_DLL const Op &tvm_mfma();

/*!
 * \brief tvm intrinsic for storing the result of AMD MFMA into a destination
 * pointer.
 *
 *        There is no real instruction that does that, but we want to hide
 * details of complex index manipulation behind this intrinsic to simplify TIR
 * lowering passes (e.g. LowerWarpMemory) like cuda ptx backend does.
 *
 * void tvm_mfma_store(IntImm m, IntImm n, Var dst_ptr, Var src_ptr, Expr
 * src_offset, Var dst_stride);
 */
TVM_DLL const Op &tvm_mfma_store();

/*!
 * \brief tvm intrinsic for amd rdna matrix core instructions.
 *
 *  void tvm_rdna_wmma(StringImm shape, StringImm A_layout, StringImm B_layout,
 *               StringImm A_dtype, StringImm B_dtype, StringImm C_dtype,
 *               Var multiplicand_a, Expr a_index,
 *               Var multiplicand_b, Expr b_index,
 *               Var accumulator, Expr c_index);
 */
TVM_DLL const Op &tvm_rdna_wmma();

/*!
 * \brief tvm intrinsic for storing the result of AMD RDNA WMMA into a
 * destination pointer.
 *
 *        There is no real instruction that does that, but we want to hide
 * details of complex index manipulation behind this intrinsic to simplify TIR
 * lowering passes (e.g. LowerWarpMemory) like cuda ptx backend does.
 *
 * void tvm_rdna_wmma_store(IntImm m, IntImm n, Var dst_ptr, Var src_ptr, Expr
 * src_offset, Var dst_stride);
 */
TVM_DLL const Op &tvm_rdna_wmma_store();

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
/*!
 * \brief tilelang intrinsic for general matrix multiplication (GEMM).
 *
 *  This op is used to represent a generic GEMM operation in tilelang.
 */
TVM_DLL const Op &tl_gemm();

/*!
 * \brief tilelang intrinsic for sparse matrix multiplication (GEMM with
 * sparsity).
 *
 *  This op is used to represent a sparse GEMM operation in tilelang.
 */
TVM_DLL const Op &tl_gemm_sp();

376
377
378
379
380
381
382
/*!
 * \brief tilelang intrinsic for shuffle elect.
 *
 *  This op is used to represent a shuffle elect operation in tilelang.
 */
TVM_DLL const Op &tl_shuffle_elect();

383
384
} // namespace tl
} // namespace tvm
385

386
#endif //  TVM_TL_OP_BUILTIN_H_