amd_buffer_addressing.hpp 12.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#ifndef CK_AMD_BUFFER_ADDRESSING_HPP
#define CK_AMD_BUFFER_ADDRESSING_HPP

#include "float_type.hpp"

namespace ck {

// For 128bit SGPRs in buffer_load and buffer_store instructions
// https://rocm-documentation.readthedocs.io/en/latest/GCN_ISA_Manuals/testdocbook.html#vector-memory-buffer-instructions
template <typename T>
union BufferLoadStoreDwordConfig
{
    int32x4_t data;
    T* address[2];
    int32_t range[4];
};

__device__ float __llvm_amdgcn_buffer_load(int32x4_t rsrc,
                                           index_t vindex,
                                           index_t offset,
                                           bool glc,
22
                                           bool slc) __asm("llvm.amdgcn.buffer.load.f32");
23
24
25
26
27

__device__ float2_t __llvm_amdgcn_buffer_loadx2(int32x4_t rsrc,
                                                index_t vindex,
                                                index_t offset,
                                                bool glc,
28
                                                bool slc) __asm("llvm.amdgcn.buffer.load.v2f32");
29
30
31
32
33

__device__ float4_t __llvm_amdgcn_buffer_loadx4(int32x4_t rsrc,
                                                index_t vindex,
                                                index_t offset,
                                                bool glc,
34
                                                bool slc) __asm("llvm.amdgcn.buffer.load.v4f32");
35
36
37
38
39
40

__device__ void __llvm_amdgcn_buffer_store(float vdata,
                                           int32x4_t rsrc,
                                           index_t vindex,
                                           index_t offset,
                                           bool glc,
41
                                           bool slc) __asm("llvm.amdgcn.buffer.store.f32");
42
43
44
45
46
47

__device__ void __llvm_amdgcn_buffer_storex2(float2_t vdata,
                                             int32x4_t rsrc,
                                             index_t vindex,
                                             index_t offset,
                                             bool glc,
48
                                             bool slc) __asm("llvm.amdgcn.buffer.store.v2f32");
49
50
51
52
53
54

__device__ void __llvm_amdgcn_buffer_storex4(float4_t vdata,
                                             int32x4_t rsrc,
                                             index_t vindex,
                                             index_t offset,
                                             bool glc,
55
                                             bool slc) __asm("llvm.amdgcn.buffer.store.v4f32");
56

57
58
59
60
61
62
63
__device__ void
__llvm_amdgcn_buffer_atomic_add(float vdata,
                                int32x4_t rsrc,
                                index_t vindex,
                                index_t offset,
                                bool slc) __asm("llvm.amdgcn.buffer.atomic.fadd.f32");

Chao Liu's avatar
Chao Liu committed
64
65
66
67
// buffer_load requires:
//   1) p_src must be in global memory space, d_dst must be vgpr
//   2) p_src to be a block-invariant pointer.
// It is user's responsibility to make sure that is true.
68
template <typename T, index_t VectorSize>
69
70
__device__ typename vector_type<T, VectorSize>::MemoryType amd_intrinsic_buffer_load(
    const T* p_src_block, index_t src_thread_data_offset, index_t src_const_data_offset);
71

Chao Liu's avatar
Chao Liu committed
72
73
74
75
// buffer_store requires:
//   1) p_src must be in vgpr space, d_dst must be global memory
//   2) p_dst to be a block-invariant pointer.
// It is user's responsibility to make sure that is true.
76
template <typename T, index_t VectorSize>
77
78
79
80
81
__device__ void
amd_intrinsic_buffer_store(const typename vector_type<T, VectorSize>::MemoryType& src,
                           T* p_dst_block,
                           index_t dst_thread_data_offset,
                           index_t dst_const_data_offset);
82

83
84
85
86
87
88
89
template <typename T, index_t VectorSize>
__device__ void
amd_intrinsic_buffer_atomic_add(const typename vector_type<T, VectorSize>::MemoryType& src,
                                T* p_dst_block,
                                index_t dst_thread_data_offset,
                                index_t dst_const_data_offset);

90
template <>
91
92
93
__device__ float amd_intrinsic_buffer_load<float, 1>(const float* p_src_block,
                                                     index_t src_thread_data_offset,
                                                     index_t src_const_data_offset)
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
{
    float dst;

    index_t src_thread_addr_offset = src_thread_data_offset * sizeof(float);
    index_t src_const_addr_offset  = src_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> src_block_config;

    // fill in byte 0 - 1
    src_block_config.address[0] = const_cast<float*>(p_src_block);
    // fill in byte 2
    src_block_config.range[2] = -1;
    // fill in byte 3
    src_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    dst = __llvm_amdgcn_buffer_load(
        src_block_config.data, 0, src_thread_addr_offset + src_const_addr_offset, false, false);
#else
    asm volatile(
        "\n \
    buffer_load_dword %0, %1, %2, %3 offen offset:0 \n \
    s_waitcnt 0 \n \
    "
        : "=v"(dst)
        : "v"(src_thread_addr_offset), "s"(src_block_config.data), "s"(src_const_addr_offset));
#endif

    return dst;
}

template <>
126
127
128
__device__ float2_t amd_intrinsic_buffer_load<float, 2>(const float* p_src_block,
                                                        index_t src_thread_data_offset,
                                                        index_t src_const_data_offset)
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
{
    float2_t dst;

    index_t src_thread_addr_offset = src_thread_data_offset * sizeof(float);
    index_t src_const_addr_offset  = src_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> src_block_config;

    // fill in byte 0 - 1
    src_block_config.address[0] = const_cast<float*>(p_src_block);
    // fill in byte 2
    src_block_config.range[2] = -1;
    // fill in byte 3
    src_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    dst = __llvm_amdgcn_buffer_loadx2(
        src_block_config.data, 0, src_thread_addr_offset + src_const_addr_offset, false, false);
#else
    asm volatile(
        "\n \
    buffer_load_dwordx2 %0, %1, %2, %3 offen offset:0 \n \
    s_waitcnt 0 \n \
    "
        : "=v"(dst)
        : "v"(src_thread_addr_offset), "s"(src_block_config.data), "s"(src_const_addr_offset));
#endif

    return dst;
}

template <>
161
162
163
__device__ float4_t amd_intrinsic_buffer_load<float, 4>(const float* p_src_block,
                                                        index_t src_thread_data_offset,
                                                        index_t src_const_data_offset)
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
{
    float4_t dst;

    index_t src_thread_addr_offset = src_thread_data_offset * sizeof(float);
    index_t src_const_addr_offset  = src_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> src_block_config;

    // fill in byte 0 - 1
    src_block_config.address[0] = const_cast<float*>(p_src_block);
    // fill in byte 2
    src_block_config.range[2] = -1;
    // fill in byte 3
    src_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    dst = __llvm_amdgcn_buffer_loadx4(
        src_block_config.data, 0, src_thread_addr_offset + src_const_addr_offset, false, false);
#else
    asm volatile(
        "\n \
    buffer_load_dwordx4 %0, %1, %2, %3 offen offset:0 \n \
    s_waitcnt 0 \n \
    "
        : "=v"(dst)
        : "v"(src_thread_addr_offset), "s"(src_block_config.data), "s"(src_const_addr_offset));
#endif

    return dst;
}

template <>
196
197
198
199
__device__ void amd_intrinsic_buffer_store<float, 1>(const float& src,
                                                     float* p_dst_block,
                                                     index_t dst_thread_data_offset,
                                                     index_t dst_const_data_offset)
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
{
    index_t dst_thread_addr_offset = dst_thread_data_offset * sizeof(float);
    index_t dst_const_addr_offset  = dst_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> dst_block_config;

    // fill in byte 0 - 1
    dst_block_config.address[0] = p_dst_block;
    // fill in byte 2
    dst_block_config.range[2] = -1;
    // fill in byte 3
    dst_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    __llvm_amdgcn_buffer_store(src,
                               dst_block_config.data,
                               0,
                               dst_thread_addr_offset + dst_const_addr_offset,
                               false,
                               false);
#else
    asm volatile("\n \
    buffer_store_dword %1, %2, %0, %3 offen offset:0 \n \
    "
                 :
                 : "s"(dst_block_config.data),
                   "v"(src),
                   "v"(dst_thread_addr_offset),
                   "s"(dst_const_addr_offset));
#endif
}

template <>
233
234
235
236
__device__ void amd_intrinsic_buffer_store<float, 2>(const float2_t& src,
                                                     float* p_dst_block,
                                                     index_t dst_thread_data_offset,
                                                     index_t dst_const_data_offset)
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
{
    index_t dst_thread_addr_offset = dst_thread_data_offset * sizeof(float);
    index_t dst_const_addr_offset  = dst_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> dst_block_config;

    // fill in byte 0 - 1
    dst_block_config.address[0] = p_dst_block;
    // fill in byte 2
    dst_block_config.range[2] = -1;
    // fill in byte 3
    dst_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    __llvm_amdgcn_buffer_storex2(src,
                                 dst_block_config.data,
                                 0,
                                 dst_thread_addr_offset + dst_const_addr_offset,
                                 false,
                                 false);
#else
    asm volatile("\n \
    buffer_store_dwordx2 %1, %2, %0, %3 offen offset:0 \n \
    "
                 :
                 : "s"(dst_block_config.data),
                   "v"(src),
                   "v"(dst_thread_addr_offset),
                   "s"(dst_const_addr_offset));
#endif
}

template <>
270
271
272
273
__device__ void amd_intrinsic_buffer_store<float, 4>(const float4_t& src,
                                                     float* p_dst_block,
                                                     index_t dst_thread_data_offset,
                                                     index_t dst_const_data_offset)
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
{
    index_t dst_thread_addr_offset = dst_thread_data_offset * sizeof(float);
    index_t dst_const_addr_offset  = dst_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> dst_block_config;

    // fill in byte 0 - 1
    dst_block_config.address[0] = p_dst_block;
    // fill in byte 2
    dst_block_config.range[2] = -1;
    // fill in byte 3
    dst_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    __llvm_amdgcn_buffer_storex4(src,
                                 dst_block_config.data,
                                 0,
                                 dst_thread_addr_offset + dst_const_addr_offset,
                                 false,
                                 false);
#else
    asm volatile("\n \
    buffer_store_dwordx4 %1, %2, %0, %3 offen offset:0 \n \
    "
                 :
                 : "s"(dst_block_config.data),
                   "v"(src),
                   "v"(dst_thread_addr_offset),
                   "s"(dst_const_addr_offset));
#endif
}

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
template <>
__device__ void amd_intrinsic_buffer_atomic_add<float, 1>(const float& src,
                                                          float* p_dst_block,
                                                          index_t dst_thread_data_offset,
                                                          index_t dst_const_data_offset)
{
    index_t dst_thread_addr_offset = dst_thread_data_offset * sizeof(float);
    index_t dst_const_addr_offset  = dst_const_data_offset * sizeof(float);

    BufferLoadStoreDwordConfig<float> dst_block_config;

    // fill in byte 0 - 1
    dst_block_config.address[0] = p_dst_block;
    // fill in byte 2
    dst_block_config.range[2] = -1;
    // fill in byte 3
    dst_block_config.range[3] = 0x00027000;

#if CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC
    __llvm_amdgcn_buffer_atomic_add(
        src, dst_block_config.data, 0, dst_thread_addr_offset + dst_const_addr_offset, false);
#else
    static_assert(false, " wrong! not implemented");
#endif
}

332
333
} // namespace ck
#endif