builtin.h 5.95 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
/*!
 * \file tl/op/builtin.h
 * \brief Builtin intrinsics.
 *
 */

#ifndef TVM_TL_OP_BUILTIN_H_
#define TVM_TL_OP_BUILTIN_H_

#include "op.h"
11
#include <tvm/ir/transform.h>
12
13
14
15

namespace tvm {
namespace tl {

16
static constexpr const char *kDisableTMALower = "tl.disable_tma_lower";
17
18
static constexpr const char *kDisableWarpSpecialized =
    "tl.disable_warp_specialized";
19
20
static constexpr const char *kConfigIndexBitwidth = "tl.config_index_bitwidth";

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
/*!
 * \brief Whether to disable dynamic tail split
 *
 * kDisableDynamicTailSplit = "tl.disable_dynamic_tail_split"
 *
 */
static constexpr const char *kDisableDynamicTailSplit =
    "tl.disable_dynamic_tail_split";

/*!
 * \brief The size of the vectorized dimension in buffer, designed by user
 *
 * For example, if the vectorized dimension is 128 bits and the dtype of buffer
 * A[m, k] is float16, the size of the vectorized dimension (i.e. k) in buffer A
 * should be divisible by 8 (8 = 128 / 16).
 *
 * kDynamicAlignment = "tl.dynamic_alignment"
 *
 */
static constexpr const char *kDynamicAlignment = "tl.dynamic_alignment";

42
43
44
/*!
 * \brief tvm intrinsics for TMADescriptor creation for tiled load
 *
45
 * CuTensorMap* create_tma_descriptor(data_type, rank, global_addr,
46
47
 * global_shape..., global_stride..., smem_box..., smem_stride..., interleave,
 * swizzle, l2_promotion, oob_fill)
48
49
 *
 */
50
const Op &create_tma_descriptor();
51
52
53
54

/*!
 * \brief tvm intrinsics for TMADescriptor creation for image to column load
 *
55
 * CuTensorMap* create_tma_im2col_descriptor(data_type, rank, global_addr,
56
57
58
 * global_shape..., global_stride..., elem_stride..., lower_corner...,
 * upper_corner..., smme_box_pixel, smem_box_channel, interleave, swizzle,
 * l2_promotion, oob_fill)
59
60
 *
 */
61
const Op &create_tma_im2col_descriptor();
62
63
64
65

/*!
 * \brief Create a list of mbarrier with num_threads
 *
66
 * create_list_of_mbarrier(num_threads0, num_threads1, ...)
67
68
 *
 */
69
const Op &create_list_of_mbarrier();
70
71
72
73
74
75
76

/*!
 * \brief Get the mbarrier with barrier_id
 *
 * int64_t* GetMBarrier(barrier_id)
 *
 */
77
const Op &get_mbarrier();
78
79

/*!
80
81
 * \brief tvm intrinsics for loading data from global tensor descriptor to
 * shared memory
82
 *
83
 * tma_load(descriptor, mbarrier, smem_data, coord_0, coord_1, ...)
84
85
 *
 */
86
const Op &tma_load();
87
88

/*!
89
90
 * \brief tvm intrinsics for loading image from global tensor to columns in
 * shared memory
91
 *
92
 * tma_load(descriptor, mbarrier, smem_data, coord_0, coord_1, ...,
93
 * image_offset, ...)
94
95
 *
 */
96
const Op &tma_load_im2col();
97
98

/*!
99
100
 * \brief tvm intrinsics for storing data from shared memory to global tensor
 * descriptor
101
 *
102
 * tma_store(descriptor, smem_data, coord_0, coord_1, ...)
103
104
 *
 */
105
const Op &tma_store();
106
107
108
109

/*!
 * \brief tvm intrinsics for mbarrier wait with parity bit
 *
110
 * mbarrier_wait_parity(mbarrier, parity)
111
112
 *
 */
113
const Op &mbarrier_wait_parity();
114
115
116
117

/*!
 * \brief tvm intrinsics for mbarrier expect tx
 *
118
 * mbarrier_expect_tx(mbarrier, transaction_bytes)
119
120
 *
 */
121
const Op &mbarrier_expect_tx();
122
123
124
125

/*!
 * \brief tvm intrinsics for ldmatrix
 *
126
 * ptx_ldmatirx(transposed, num, shared_addr, local_addr)
127
128
 *
 */
129
const Op &ptx_ldmatirx();
130
131
132
133

/*!
 * \brief tvm intrinsics for stmatrix
 *
134
 * ptx_ldmatirx(transposed, num, shared_addr, int32_values...)
135
136
 *
 */
137
const Op &ptx_stmatirx();
138
139
140
141

/*!
 * \brief Pack two b16 value into a b32 value
 *
142
 * int32 pack_b16(b16_value, b16_value)
143
144
 *
 */
145
const Op &pack_b16();
146
147
148
149

/*!
 * \brief Similar to __syncthreads(), but can be used to sync partial threads
 *
150
 * sync_thread_partial(num_partial_threads or mbarrier)
151
152
 *
 */
153
const Op &sync_thread_partial();
154
155
156
157
158
159
160

/*!
 * \brief Issue a shared memory fence for async operations
 *
 * FenceProxyAsync()
 *
 */
161
const Op &fence_proxy_async();
162

163
164
165
/*!
 * \brief Indicate arrival of warp issuing TMA_STORE
 *
166
 * tma_store_arrive()
167
168
 *
 */
169
const Op &tma_store_arrive();
170
171
172
173

/*!
 * \brief Wait for TMA_STORE to finish
 *
174
 * tma_store_wait()
175
176
 *
 */
177
const Op &tma_store_wait();
178

179
180
181
182
183
184
/*!
 * \brief Set reg hint for warp-specialized branched
 *
 * SetMaxNRegInc(num_reg, is_inc)
 *
 */
185
const Op &set_max_nreg();
186

187
188
189
/*!
 * \brief No set reg hint for warp-specialized branched
 *
190
 * no_set_max_nreg()
191
192
 *
 */
193
const Op &no_set_max_nreg();
194

195
196
197
/*!
 * \brief Wait the previous wgmma to finish
 *
198
 * wait_wgmma(num_mma)
199
200
 *
 */
201
const Op &wait_wgmma();
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250

/*!
 * \brief tvm intrinsic for amd matrix core mfma instructions.
 *
 *  void tvm_mfma(StringImm shape, StringImm A_layout, StringImm B_layout,
 *               StringImm A_dtype, StringImm B_dtype, StringImm C_dtype,
 *               Var multiplicand_a, Expr a_index,
 *               Var multiplicand_b, Expr b_index,
 *               Var accumulator, Expr c_index);
 */
TVM_DLL const Op &tvm_mfma();

/*!
 * \brief tvm intrinsic for storing the result of AMD MFMA into a destination
 * pointer.
 *
 *        There is no real instruction that does that, but we want to hide
 * details of complex index manipulation behind this intrinsic to simplify TIR
 * lowering passes (e.g. LowerWarpMemory) like cuda ptx backend does.
 *
 * void tvm_mfma_store(IntImm m, IntImm n, Var dst_ptr, Var src_ptr, Expr
 * src_offset, Var dst_stride);
 */
TVM_DLL const Op &tvm_mfma_store();

/*!
 * \brief tvm intrinsic for amd rdna matrix core instructions.
 *
 *  void tvm_rdna_wmma(StringImm shape, StringImm A_layout, StringImm B_layout,
 *               StringImm A_dtype, StringImm B_dtype, StringImm C_dtype,
 *               Var multiplicand_a, Expr a_index,
 *               Var multiplicand_b, Expr b_index,
 *               Var accumulator, Expr c_index);
 */
TVM_DLL const Op &tvm_rdna_wmma();

/*!
 * \brief tvm intrinsic for storing the result of AMD RDNA WMMA into a
 * destination pointer.
 *
 *        There is no real instruction that does that, but we want to hide
 * details of complex index manipulation behind this intrinsic to simplify TIR
 * lowering passes (e.g. LowerWarpMemory) like cuda ptx backend does.
 *
 * void tvm_rdna_wmma_store(IntImm m, IntImm n, Var dst_ptr, Var src_ptr, Expr
 * src_offset, Var dst_stride);
 */
TVM_DLL const Op &tvm_rdna_wmma_store();

251
252
} // namespace tl
} // namespace tvm
253

254
#endif //  TVM_TL_OP_BUILTIN_H_