Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
854cd8b4
Commit
854cd8b4
authored
Feb 18, 2025
by
mtgu0705
Browse files
commit missing files
parent
182e7480
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
900 additions
and
19 deletions
+900
-19
example/65_gemm_multiply_multiply/moe_gemm2.cpp
example/65_gemm_multiply_multiply/moe_gemm2.cpp
+2
-2
example/65_gemm_multiply_multiply/moe_pk_i4_gemm1.cpp
example/65_gemm_multiply_multiply/moe_pk_i4_gemm1.cpp
+1
-1
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_selector.hpp
.../blockwise_gemm_pipeline_xdlops_b_preshuffle_selector.hpp
+23
-3
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v1.hpp
.../block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v1.hpp
+7
-7
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v2.hpp
.../block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v2.hpp
+1
-1
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v3.hpp
.../block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v3.hpp
+860
-0
include/ck/tensor_operation/gpu/device/impl/device_moe_gemm.hpp
...e/ck/tensor_operation/gpu/device/impl/device_moe_gemm.hpp
+6
-5
No files found.
example/65_gemm_multiply_multiply/moe_gemm2.cpp
View file @
854cd8b4
...
@@ -268,8 +268,8 @@ int main(int argc, char* argv[])
...
@@ -268,8 +268,8 @@ int main(int argc, char* argv[])
expert_ids
.
savetxt
(
"expert_ids.txt"
,
"int"
);
expert_ids
.
savetxt
(
"expert_ids.txt"
,
"int"
);
sorted_token_ids
.
savetxt
(
"sorted_token_ids.txt"
,
"int"
);
sorted_token_ids
.
savetxt
(
"sorted_token_ids.txt"
,
"int"
);
Tensor
<
A0DataType
>
a0_t_k_k
(
HostTensorDescriptor
({
tokens
,
topk
,
K
},
{
topk
*
K
,
K
,
1
}));
Tensor
<
A0DataType
>
a0_t_k_k
(
HostTensorDescriptor
({
tokens
,
topk
,
K
},
{
topk
*
K
,
K
,
1
}));
Tensor
<
B0DataType
>
b0_e_n_k
(
HostTensorDescriptor
({
experts
,
K
,
N
},
{
N
*
K
,
1
,
N
}));
Tensor
<
B0DataType
>
b0_e_n_k
(
HostTensorDescriptor
({
experts
,
K
,
N
},
{
N
*
K
,
1
,
K
}));
Tensor
<
B0DataType
>
b0_preshuffled
(
HostTensorDescriptor
({
experts
,
K
,
N
},
{
N
*
K
,
1
,
N
}));
Tensor
<
B0DataType
>
b0_preshuffled
(
HostTensorDescriptor
({
experts
,
K
,
N
},
{
N
*
K
,
1
,
K
}));
Tensor
<
D0DataType
>
d0_t_n
(
HostTensorDescriptor
({
tokens
,
N
},
{
StrideDs
[
0
],
0
}));
Tensor
<
D0DataType
>
d0_t_n
(
HostTensorDescriptor
({
tokens
,
N
},
{
StrideDs
[
0
],
0
}));
Tensor
<
D1DataType
>
d1_e_n
(
HostTensorDescriptor
({
experts
,
N
},
{
1
,
StrideDs
[
1
]}));
Tensor
<
D1DataType
>
d1_e_n
(
HostTensorDescriptor
({
experts
,
N
},
{
1
,
StrideDs
[
1
]}));
Tensor
<
D2DataType
>
d2_e_n
(
HostTensorDescriptor
({
sorted_size
,
N
},
{
1
,
0
}));
Tensor
<
D2DataType
>
d2_e_n
(
HostTensorDescriptor
({
sorted_size
,
N
},
{
1
,
0
}));
...
...
example/65_gemm_multiply_multiply/moe_pk_i4_gemm1.cpp
View file @
854cd8b4
...
@@ -36,7 +36,7 @@ using Col = ck::tensor_layout::gemm::ColumnMajor;
...
@@ -36,7 +36,7 @@ using Col = ck::tensor_layout::gemm::ColumnMajor;
using
A0DataType
=
F8
;
using
A0DataType
=
F8
;
using
B0DataType
=
I4
;
using
B0DataType
=
I4
;
using
EDataType
=
F
32
;
using
EDataType
=
F
16
;
using
AccDataType
=
F32
;
using
AccDataType
=
F32
;
using
CShuffleDataType
=
F32
;
using
CShuffleDataType
=
F32
;
using
D0DataType
=
F32
;
using
D0DataType
=
F32
;
...
...
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_selector.hpp
View file @
854cd8b4
...
@@ -6,6 +6,7 @@
...
@@ -6,6 +6,7 @@
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v1.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v1.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_dequant_v1.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_dequant_v1.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v2.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v2.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v3.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_dequant_v3.hpp"
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_dequant_v3.hpp"
namespace
ck
{
namespace
ck
{
...
@@ -35,7 +36,7 @@ constexpr auto BlockGemmBPreshufflePipeline_Selector()
...
@@ -35,7 +36,7 @@ constexpr auto BlockGemmBPreshufflePipeline_Selector()
{
{
if
constexpr
(
BlkGemmPipelineVer
==
BlockGemmPipelineVersion
::
v1
)
if
constexpr
(
BlkGemmPipelineVer
==
BlockGemmPipelineVersion
::
v1
)
{
{
if
(
std
::
is_same
<
ADataType
,
BDataType
>::
value
)
if
constexpr
(
std
::
is_same
<
ADataType
,
BDataType
>::
value
)
{
{
return
BlockwiseGemmXdlops_pipeline_bpreshuffle_v1
<
BlkGemmPipeSche
,
return
BlockwiseGemmXdlops_pipeline_bpreshuffle_v1
<
BlkGemmPipeSche
,
BlockSize
,
BlockSize
,
...
@@ -109,9 +110,28 @@ constexpr auto BlockGemmBPreshufflePipeline_Selector()
...
@@ -109,9 +110,28 @@ constexpr auto BlockGemmBPreshufflePipeline_Selector()
else
if
constexpr
(
BlkGemmPipelineVer
==
BlockGemmPipelineVersion
::
v3
)
else
if
constexpr
(
BlkGemmPipelineVer
==
BlockGemmPipelineVersion
::
v3
)
{
{
static_assert
(
MRepeat
>=
4
,
"MRepeat should at least be 4 in BlockGemmPipelineVersion::v3"
);
static_assert
(
MRepeat
>=
4
,
"MRepeat should at least be 4 in BlockGemmPipelineVersion::v3"
);
if
(
std
::
is_same
<
ADataType
,
BDataType
>::
value
)
if
constexpr
(
std
::
is_same
<
ADataType
,
BDataType
>::
value
)
{
{
std
::
cerr
<<
"BlockGemmPipeline v3 configuration is not available"
<<
std
::
endl
;
return
BlockwiseGemmXdlops_pipeline_bpreshuffle_v3
<
BlkGemmPipeSche
,
BlockSize
,
ADataType
,
BDataType
,
ComputeDataType
,
AccDataType
,
ATileDesc
,
BTileDesc
,
AMmaTileDesc
,
BMmaTileDesc
,
ABlockTransferSrcScalarPerVector
,
BBlockTransferSrcScalarPerVector
,
MPerBlock
,
NPerBlock
,
KPerBlock
,
MPerXDL
,
NPerXDL
,
MRepeat
,
NRepeat
,
KPack
>
{};
}
}
else
else
{
{
...
...
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v1.hpp
View file @
854cd8b4
...
@@ -144,7 +144,7 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
...
@@ -144,7 +144,7 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
static
constexpr
index_t
PrefetchStages
=
2
;
static
constexpr
index_t
PrefetchStages
=
2
;
static
constexpr
index_t
PrefillStages
=
1
;
static
constexpr
index_t
PrefillStages
=
1
;
static
constexpr
index_t
GlobalBufferNum
=
1
;
static
constexpr
index_t
GlobalBufferNum
=
2
;
template
<
typename
TileDesc_M0_M1_M2_K
>
template
<
typename
TileDesc_M0_M1_M2_K
>
__host__
__device__
static
constexpr
auto
MakeAGemmMmaTileDescriptor
(
const
TileDesc_M0_M1_M2_K
&
)
__host__
__device__
static
constexpr
auto
MakeAGemmMmaTileDescriptor
(
const
TileDesc_M0_M1_M2_K
&
)
...
@@ -249,7 +249,7 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
...
@@ -249,7 +249,7 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
constexpr
auto
b_block_origin_idx
=
make_tuple
(
I0
,
I0
,
I0
,
I0
);
constexpr
auto
b_block_origin_idx
=
make_tuple
(
I0
,
I0
,
I0
,
I0
);
// Global prefetch A1 B1
// Global prefetch A1 B1
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
,
I0
);
b_blockwise_copy
.
Run
(
b_grid_desc
,
b_blockwise_copy
.
Run
(
b_grid_desc
,
b_grid_buf
,
b_grid_buf
,
b_block_desc_n0_n1_k0_k1
,
b_block_desc_n0_n1_k0_k1
,
...
@@ -258,12 +258,13 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
...
@@ -258,12 +258,13 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
__builtin_amdgcn_sched_barrier
(
0
);
// // Local prefill A1
// // Local prefill A1
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
);
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
,
I0
);
// // Global prefetch A2
// // Global prefetch A2
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
,
I0
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
// Local prefetch A1
// Local prefetch A1
...
@@ -296,13 +297,12 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
...
@@ -296,13 +297,12 @@ struct BlockwiseGemmXdlops_pipeline_bpreshuffle_v1<BlockGemmPipelineScheduler::I
b_block_desc_n0_n1_k0_k1
,
b_block_desc_n0_n1_k0_k1
,
b_block_origin_idx
,
b_block_origin_idx
,
b_thread_bufs
(
local_read_buf
));
b_thread_bufs
(
local_read_buf
));
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
block_sync_lds
();
block_sync_lds
();
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
);
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
,
mfma_reg_buf
);
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
,
local_read_buf
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
// printf("bid %d tid %d %f %f\n", blockIdx.x, threadIdx.x,
// printf("bid %d tid %d %f %f\n", blockIdx.x, threadIdx.x,
...
...
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v2.hpp
View file @
854cd8b4
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
5
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#pragma once
...
...
include/ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_b_preshuffle_v3.hpp
0 → 100644
View file @
854cd8b4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/tensor_operation/gpu/block/blockwise_gemm_pipeline_xdlops_base.hpp"
namespace
ck
{
// Compute optimized pipeline
// GlobalPrefetchStages: 2
// LocalPreFillStages: 1
// LocalPreFetchStages: 1
// LocalSharedMemoryBuffer: 1
template
<
BlockGemmPipelineScheduler
BlkGemmPipelineVer
,
index_t
BlockSize
,
typename
ADataType
,
typename
BDataType
,
typename
ComputeDataType
,
typename
AccDataType
,
typename
ATileDesc
,
typename
BTileDesc
,
typename
AMmaTileDesc
,
typename
BMmaTileDesc
,
index_t
ABlockTransferSrcScalarPerVector
,
index_t
BBlockTransferSrcScalarPerVector
,
index_t
MPerBlock
,
index_t
NPerBlock
,
index_t
KPerBlock
,
index_t
MPerXDL
,
index_t
NPerXDL
,
index_t
MRepeat
,
index_t
NRepeat
,
index_t
KPacks
>
struct
BlockwiseGemmXdlops_pipeline_bpreshuffle_v3
{
};
template
<
index_t
BlockSize
,
typename
ADataType
,
typename
BDataType
,
typename
ComputeDataType
,
typename
AccDataType
,
typename
ATileDesc
,
typename
BTileDesc
,
typename
AMmaTileDesc
,
typename
BMmaTileDesc
,
index_t
ABlockTransferSrcScalarPerVector
,
index_t
BBlockTransferSrcScalarPerVector
,
index_t
MPerBlock
,
index_t
NPerBlock
,
index_t
KPerBlock
,
index_t
MPerXDL
,
index_t
NPerXDL
,
index_t
MRepeat
,
index_t
NRepeat
,
index_t
KPack
// ,bool TransposeC //disable transposec right now...
>
struct
BlockwiseGemmXdlops_pipeline_bpreshuffle_v3
<
BlockGemmPipelineScheduler
::
Intrawave
,
BlockSize
,
ADataType
,
BDataType
,
ComputeDataType
,
AccDataType
,
ATileDesc
,
BTileDesc
,
AMmaTileDesc
,
BMmaTileDesc
,
ABlockTransferSrcScalarPerVector
,
BBlockTransferSrcScalarPerVector
,
MPerBlock
,
NPerBlock
,
KPerBlock
,
MPerXDL
,
NPerXDL
,
MRepeat
,
NRepeat
,
KPack
>
:
BlockwiseGemmXdlops_pipeline_base
<
BlockSize
,
ADataType
,
BDataType
,
ComputeDataType
,
AccDataType
,
ATileDesc
,
BTileDesc
,
AMmaTileDesc
,
BMmaTileDesc
,
ABlockTransferSrcScalarPerVector
,
BBlockTransferSrcScalarPerVector
,
MPerBlock
,
NPerBlock
,
KPerBlock
,
MPerXDL
,
NPerXDL
,
MRepeat
,
NRepeat
,
KPack
>
{
using
Base
=
BlockwiseGemmXdlops_pipeline_base
<
BlockSize
,
ADataType
,
BDataType
,
ComputeDataType
,
AccDataType
,
ATileDesc
,
BTileDesc
,
AMmaTileDesc
,
BMmaTileDesc
,
ABlockTransferSrcScalarPerVector
,
BBlockTransferSrcScalarPerVector
,
MPerBlock
,
NPerBlock
,
KPerBlock
,
MPerXDL
,
NPerXDL
,
MRepeat
,
NRepeat
,
KPack
>
;
using
Base
::
A_K1
;
using
Base
::
B_K1
;
using
Base
::
I0
;
using
Base
::
I1
;
using
Base
::
I2
;
using
Base
::
KRepeat
;
using
Base
::
xdlops_gemm
;
using
typename
Base
::
HotLoopInstList
;
using
Base
::
a_block_desc_m0_m1_m2_k
;
using
Base
::
CalculateCThreadOriginDataIndex
;
using
Base
::
CalculateCThreadOriginDataIndex8D
;
using
Base
::
GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
GetCBlockDescriptor_M0_N0_M1_N1_M2_N2_N3_N4
;
using
Base
::
GetCThreadBuffer
;
using
Base
::
GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
GetCThreadDescriptor_M0_N0_M1_N1_M2_N2_N3_N4
;
using
Base
::
MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
;
using
Base
::
AMmaKStride
;
using
Base
::
BMmaKStride
;
using
Base
::
MWaves
;
static
constexpr
index_t
PrefetchStages
=
2
;
static
constexpr
index_t
PrefillStages
=
1
;
static
constexpr
index_t
GlobalBufferNum
=
1
;
static
constexpr
index_t
HotloopLocalBufSwitch
=
MRepeat
%
2
==
0
?
0
:
1
;
template
<
typename
TileDesc_M0_M1_M2_K
>
__host__
__device__
static
constexpr
auto
MakeAGemmMmaTileDescriptor
(
const
TileDesc_M0_M1_M2_K
&
)
{
constexpr
index_t
M0
=
TileDesc_M0_M1_M2_K
{}.
GetLength
(
Number
<
0
>
{});
constexpr
index_t
M1
=
TileDesc_M0_M1_M2_K
{}.
GetLength
(
Number
<
1
>
{});
constexpr
index_t
M2
=
TileDesc_M0_M1_M2_K
{}.
GetLength
(
Number
<
2
>
{});
constexpr
index_t
K2
=
KPack
;
constexpr
index_t
K1
=
64
/
NPerXDL
;
constexpr
index_t
K0
=
KRepeat
;
return
transform_tensor_descriptor
(
TileDesc_M0_M1_M2_K
{},
make_tuple
(
make_pass_through_transform
(
Number
<
M0
>
{}),
make_pass_through_transform
(
Number
<
M1
>
{}),
make_pass_through_transform
(
Number
<
M2
>
{}),
make_unmerge_transform
(
make_tuple
(
Number
<
K0
>
{},
Number
<
K1
>
{},
Number
<
K2
>
{}))),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{},
Sequence
<
2
>
{},
Sequence
<
3
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{},
Sequence
<
2
>
{},
Sequence
<
3
,
4
,
5
>
{}));
}
static
constexpr
auto
a_block_desc_m0_m1_m2_k0_k1_k2
=
MakeAGemmMmaTileDescriptor
(
a_block_desc_m0_m1_m2_k
);
__host__
__device__
static
constexpr
bool
BlockHasHotloop
(
index_t
num_loop
)
{
return
num_loop
>
PrefetchStages
;
}
__host__
__device__
static
constexpr
TailNumber
BlockLoopTailNum
(
index_t
num_loop
)
{
return
num_loop
%
2
==
0
?
TailNumber
::
Even
:
TailNumber
::
Odd
;
}
template
<
typename
Stage
>
__device__
static
constexpr
auto
HotLoopScheduler
(
Stage
stage
)
{
constexpr
auto
num_ds_read_inst_a
=
HotLoopInstList
::
A_LDS_Read_Inst_Num
;
constexpr
auto
num_ds_write_inst_a
=
HotLoopInstList
::
A_LDS_Write_Inst_Num
;
constexpr
auto
num_buffer_load_inst_a
=
HotLoopInstList
::
A_Buffer_Load_Inst_Num
;
constexpr
auto
num_buffer_load_inst_b
=
MWaves
*
HotLoopInstList
::
B_Buffer_Load_Inst_Num
;
constexpr
auto
num_mfma
=
HotLoopInstList
::
C_MFMA_Inst_Num
;
constexpr
auto
staged_num_ds_read_inst_a
=
num_ds_read_inst_a
/
MRepeat
;
constexpr
auto
staged_num_mfma
=
num_mfma
/
MRepeat
;
constexpr
auto
staged_num_mfma_per_ds_read_a
=
staged_num_mfma
/
staged_num_ds_read_inst_a
;
if
constexpr
(
stage
.
value
==
0
)
{
constexpr
auto
staged_num_buffer_load_b_per_ds_read_a
=
num_buffer_load_inst_b
/
staged_num_ds_read_inst_a
;
constexpr
auto
staged_num_mfma_per_buffer_load_b
=
staged_num_mfma
/
num_buffer_load_inst_b
;
// B global
static_for
<
0
,
staged_num_ds_read_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
ignore
=
i_inst
;
static_for
<
0
,
staged_num_buffer_load_b_per_ds_read_a
-
1
,
1
>
{}([
&
](
auto
ibuf_inst
)
{
ignore
=
ibuf_inst
;
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_b
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
});
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_b
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
});
__builtin_amdgcn_sched_barrier
(
0
);
}
else
if
constexpr
(
stage
.
value
==
1
)
{
constexpr
auto
staged_num_mfma_per_ds_write_a
=
math
::
integer_divide_ceil
(
staged_num_mfma
,
num_ds_write_inst_a
);
constexpr
auto
stage_more_mfma
=
staged_num_mfma
-
(
staged_num_mfma_per_ds_write_a
-
1
)
*
num_ds_write_inst_a
;
// A local write
static_for
<
0
,
num_ds_write_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
if
constexpr
(
i_inst
.
value
<
stage_more_mfma
)
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
}
}
else
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
2
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
}
}
});
__builtin_amdgcn_sched_barrier
(
0
);
}
else
if
constexpr
(
stage
.
value
==
2
)
{
constexpr
auto
staged_num_mfma_per_buffer_load_a
=
math
::
integer_divide_ceil
(
staged_num_mfma
,
num_buffer_load_inst_a
);
constexpr
auto
stage_more_mfma
=
staged_num_mfma
-
(
staged_num_mfma_per_buffer_load_a
-
1
)
*
num_buffer_load_inst_a
;
// A global
static_for
<
0
,
num_buffer_load_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
if
constexpr
(
i_inst
.
value
<
stage_more_mfma
)
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
}
}
else
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_a
-
2
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
}
}
});
__builtin_amdgcn_sched_barrier
(
0
);
}
else
{
// A local Read
static_for
<
0
,
staged_num_ds_read_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
ignore
=
i_inst
;
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_read_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
});
__builtin_amdgcn_sched_barrier
(
0
);
}
}
template
<
typename
Stage
>
__device__
static
constexpr
auto
EpilogueScheduler_1
(
Stage
stage
)
{
constexpr
auto
num_ds_read_inst_a
=
HotLoopInstList
::
A_LDS_Read_Inst_Num
;
constexpr
auto
num_ds_write_inst_a
=
HotLoopInstList
::
A_LDS_Write_Inst_Num
;
constexpr
auto
num_buffer_load_inst_b
=
MWaves
*
HotLoopInstList
::
B_Buffer_Load_Inst_Num
;
constexpr
auto
num_mfma
=
HotLoopInstList
::
C_MFMA_Inst_Num
;
constexpr
auto
staged_num_ds_read_inst_a
=
num_ds_read_inst_a
/
MRepeat
;
constexpr
auto
staged_num_mfma
=
num_mfma
/
MRepeat
;
constexpr
auto
staged_num_mfma_per_ds_read_a
=
staged_num_mfma
/
staged_num_ds_read_inst_a
;
if
constexpr
(
stage
.
value
==
0
)
{
constexpr
auto
staged_num_buffer_load_b_per_ds_read_a
=
num_buffer_load_inst_b
/
staged_num_ds_read_inst_a
;
constexpr
auto
staged_num_mfma_per_buffer_load_b
=
staged_num_mfma
/
num_buffer_load_inst_b
;
// B global
static_for
<
0
,
staged_num_ds_read_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
ignore
=
i_inst
;
static_for
<
0
,
staged_num_buffer_load_b_per_ds_read_a
,
1
>
{}([
&
](
auto
ibuf_inst
)
{
ignore
=
ibuf_inst
;
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_b
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
});
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_buffer_load_b
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x020
,
1
,
0
);
// VMEM read
});
__builtin_amdgcn_sched_barrier
(
0
);
}
else
if
constexpr
(
stage
.
value
==
1
)
{
#if 0
constexpr auto staged_num_ds_write_a_per_ds_read_a =
num_ds_write_inst_a / staged_num_ds_read_inst_a;
constexpr auto staged_num_mfma_per_ds_write_a = staged_num_mfma / num_ds_write_inst_a;
// A local write
static_for<0, staged_num_ds_read_inst_a, 1>{}([&](auto i_inst) {
ignore = i_inst;
static_for<0, staged_num_ds_write_a_per_ds_read_a, 1>{}([&](auto idswrite_inst) {
ignore = idswrite_inst;
__builtin_amdgcn_sched_group_barrier(
0x008, staged_num_mfma_per_ds_write_a - 1, 0); // MFMA
__builtin_amdgcn_sched_group_barrier(0x200, 1, 0); // DS Write
});
__builtin_amdgcn_sched_group_barrier(
0x008, staged_num_ds_write_a_per_ds_read_a, 0); // MFMA
__builtin_amdgcn_sched_group_barrier(0x100, 1, 0); // DS read
});
#elif
1
constexpr
auto
staged_num_mfma_per_ds_write_a
=
math
::
integer_divide_ceil
(
staged_num_mfma
,
num_ds_write_inst_a
);
constexpr
auto
stage_more_mfma
=
staged_num_mfma
-
(
staged_num_mfma_per_ds_write_a
-
1
)
*
num_ds_write_inst_a
;
// A local write
static_for
<
0
,
num_ds_write_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
if
constexpr
(
i_inst
.
value
<
stage_more_mfma
)
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
}
}
else
{
if
(
i_inst
.
value
<
staged_num_ds_read_inst_a
)
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
2
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
__builtin_amdgcn_sched_group_barrier
(
0x008
,
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
}
else
{
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_write_a
-
1
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x200
,
1
,
0
);
// DS Write
}
}
});
#endif
__builtin_amdgcn_sched_barrier
(
0
);
}
else
{
// A local Read
static_for
<
0
,
staged_num_ds_read_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
ignore
=
i_inst
;
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_read_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
});
__builtin_amdgcn_sched_barrier
(
0
);
}
}
__device__
static
constexpr
auto
EpilogueScheduler_2
()
{
constexpr
auto
num_ds_read_inst_a
=
HotLoopInstList
::
A_LDS_Read_Inst_Num
;
constexpr
auto
num_mfma
=
HotLoopInstList
::
C_MFMA_Inst_Num
;
constexpr
auto
staged_num_ds_read_inst_a
=
num_ds_read_inst_a
/
MRepeat
;
constexpr
auto
staged_num_mfma
=
num_mfma
/
MRepeat
;
constexpr
auto
staged_num_mfma_per_ds_read_a
=
staged_num_mfma
/
staged_num_ds_read_inst_a
;
// A local Read
static_for
<
0
,
staged_num_ds_read_inst_a
,
1
>
{}([
&
](
auto
i_inst
)
{
ignore
=
i_inst
;
__builtin_amdgcn_sched_group_barrier
(
0x008
,
staged_num_mfma_per_ds_read_a
,
0
);
// MFMA
__builtin_amdgcn_sched_group_barrier
(
0x100
,
1
,
0
);
// DS read
});
__builtin_amdgcn_sched_barrier
(
0
);
}
template
<
bool
HasMainLoop
,
TailNumber
TailNum
,
typename
AGridDesc
,
typename
ABlockDesc
,
typename
ABlockTransfer
,
typename
AGridBuffer
,
typename
ABlockBuffer
,
typename
ABlockTransferStep
,
typename
BGridDesc
,
typename
BBlockTransfer
,
typename
BGridBuffer
,
typename
BBlockBuffer
,
typename
BBlockTransferStep
,
typename
CThreadBuffer
>
__device__
void
Run
(
const
AGridDesc
&
a_grid_desc
,
const
ABlockDesc
&
a_block_desc
,
ABlockTransfer
&
a_blockwise_copy
,
const
AGridBuffer
&
a_grid_buf
,
ABlockBuffer
&
a_block_buf
,
const
ABlockTransferStep
&
a_block_copy_step
,
const
BGridDesc
&
b_grid_desc
,
BBlockTransfer
&
b_blockwise_copy
,
const
BGridBuffer
&
b_grid_buf
,
BBlockBuffer
&
b_block_buf
,
const
BBlockTransferStep
&
b_block_copy_step
,
CThreadBuffer
&
c_thread_buf
,
index_t
num_loop
)
const
{
ignore
=
b_block_buf
;
__builtin_amdgcn_sched_barrier
(
0
);
auto
a_thread_buf
=
make_static_buffer
<
AddressSpaceEnum
::
Vgpr
,
ComputeDataType
>
(
a_thread_desc_
.
GetElementSpaceSize
());
auto
b_thread_buf
=
make_static_buffer
<
AddressSpaceEnum
::
Vgpr
,
ComputeDataType
>
(
b_thread_desc_
.
GetElementSpaceSize
());
StaticallyIndexedArray
<
decltype
(
b_thread_buf
),
Number
<
2
>
{}
>
b_thread_bufs
;
constexpr
auto
b_block_origin_idx
=
make_tuple
(
I0
,
I0
,
I0
,
I0
);
// Global prefetch A1 B1
b_blockwise_copy
.
Run
(
b_grid_desc
,
b_grid_buf
,
b_block_desc_n0_n1_k0_k1
,
b_block_origin_idx
,
b_thread_bufs
(
I0
));
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
__builtin_amdgcn_sched_barrier
(
0
);
// // Local prefill A1
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
.
At
(
I0
));
// // Global prefetch A2
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
// Local prefetch A1
block_sync_lds
();
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
I0
,
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
I0
),
a_thread_desc_
,
make_tuple
(
I0
,
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
// Initialize C
c_thread_buf
.
Clear
();
__builtin_amdgcn_sched_barrier
(
0
);
// main body
if
constexpr
(
HasMainLoop
)
{
index_t
i
=
0
;
do
{
auto
LoopFunc
=
[
&
](
auto
mfma_reg_buf
,
auto
local_read_buf
)
{
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
if
constexpr
(
m0
.
value
==
0
)
{
b_blockwise_copy
.
Run
(
b_grid_desc
,
b_grid_buf
,
b_block_desc_n0_n1_k0_k1
,
b_block_origin_idx
,
b_thread_bufs
(
local_read_buf
));
b_blockwise_copy
.
MoveSrcSliceWindow
(
b_grid_desc
,
b_block_copy_step
);
}
else
if
constexpr
(
m0
.
value
==
1
)
{
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
.
At
(
local_read_buf
));
}
else
if
constexpr
(
m0
.
value
==
2
)
{
a_blockwise_copy
.
RunRead
(
a_grid_desc
,
a_grid_buf
);
a_blockwise_copy
.
MoveSrcSliceWindow
(
a_grid_desc
,
a_block_copy_step
);
}
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
vector_type
<
ComputeDataType
,
KPack
>
a_thread_vec
;
vector_type
<
ComputeDataType
,
KPack
>
b_thread_vec
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
ik
)
{
a_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
((
m0
+
HotloopLocalBufSwitch
*
mfma_reg_buf
)
%
2
,
I0
,
I0
,
k0
,
I0
,
ik
))
>
{}];
b_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
b_thread_bufs
[
mfma_reg_buf
]
[
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
n0
,
I0
,
k0
,
ik
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
ComputeDataType
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>
{}));
});
});
if
constexpr
(
m0
.
value
==
MRepeat
-
1
)
{
block_sync_lds
();
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
(
m0
+
1
)
%
MRepeat
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
local_read_buf
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
+
HotloopLocalBufSwitch
*
mfma_reg_buf
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
}
else
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
(
m0
+
1
)
%
MRepeat
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
mfma_reg_buf
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
+
HotloopLocalBufSwitch
*
mfma_reg_buf
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
}
HotLoopScheduler
(
m0
);
});
};
LoopFunc
(
I0
,
I1
);
LoopFunc
(
I1
,
I0
);
i
+=
2
;
}
while
(
i
<
(
num_loop
-
2
));
}
// tail
if
constexpr
(
TailNum
==
TailNumber
::
Even
)
{
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
if
constexpr
(
m0
.
value
==
0
)
{
b_blockwise_copy
.
Run
(
b_grid_desc
,
b_grid_buf
,
b_block_desc_n0_n1_k0_k1
,
b_block_origin_idx
,
b_thread_bufs
(
I1
));
}
else
if
constexpr
(
m0
.
value
==
MRepeat
-
1
)
{
a_blockwise_copy
.
RunWrite
(
a_block_desc
,
a_block_buf
.
At
(
I1
));
}
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
vector_type
<
ComputeDataType
,
KPack
>
a_thread_vec
;
vector_type
<
ComputeDataType
,
KPack
>
b_thread_vec
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
ik
)
{
a_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
%
2
,
I0
,
I0
,
k0
,
I0
,
ik
))
>
{}];
b_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
b_thread_bufs
[
I0
][
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
n0
,
I0
,
k0
,
ik
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
ComputeDataType
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>
{}));
});
});
if
constexpr
(
m0
.
value
==
MRepeat
-
1
)
{
block_sync_lds
();
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
(
m0
+
1
)
%
MRepeat
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
I1
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
}
else
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
(
m0
+
1
)
%
MRepeat
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
I0
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
}
EpilogueScheduler_1
(
m0
);
});
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
vector_type
<
ComputeDataType
,
KPack
>
a_thread_vec
;
vector_type
<
ComputeDataType
,
KPack
>
b_thread_vec
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
ik
)
{
a_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
(
(
m0
+
HotloopLocalBufSwitch
)
%
2
,
I0
,
I0
,
k0
,
I0
,
ik
))
>
{}];
b_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
b_thread_bufs
[
I1
][
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
n0
,
I0
,
k0
,
ik
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
ComputeDataType
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>
{}));
});
});
if
constexpr
(
m0
.
value
!=
(
MRepeat
-
1
))
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
m0
+
1
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
I1
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
+
HotloopLocalBufSwitch
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
EpilogueScheduler_2
();
}
});
// Let's leak last MFMA block to epilogue region, cover the potential lds-shuffle
// latency
// __builtin_amdgcn_sched_barrier(0);
}
else
{
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
vector_type
<
ComputeDataType
,
KPack
>
a_thread_vec
;
vector_type
<
ComputeDataType
,
KPack
>
b_thread_vec
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
ik
)
{
a_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
%
2
,
I0
,
I0
,
k0
,
I0
,
ik
))
>
{}];
b_thread_vec
.
template
AsType
<
ComputeDataType
>()(
ik
)
=
b_thread_bufs
[
I0
][
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
n0
,
I0
,
k0
,
ik
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
ComputeDataType
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>
{}));
});
});
if
constexpr
(
m0
.
value
!=
(
MRepeat
-
1
))
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
k0
)
{
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k0_k1_k2
,
make_tuple
(
Number
<
m0
+
1
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_block_buf
.
At
(
I0
),
a_thread_desc_
,
make_tuple
(
Number
<
(
m0
+
1
)
%
2
>
{},
I0
,
I0
,
k0
,
I0
,
I0
),
a_thread_buf
);
});
EpilogueScheduler_2
();
}
});
}
}
protected:
// MRepeat MWave MLane KRepeat KLane KPack
// KRepeat -> MRepeat-> Mwave->KLane->MLane->KPack
// Reduce the vgpr usage here.
static
constexpr
auto
a_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I2
,
I1
,
I1
,
Number
<
KRepeat
>
{},
I1
,
Number
<
KPack
>
{}));
using
AThreadCopy
=
ThreadwiseTensorSliceTransfer_v4
<
ADataType
,
ComputeDataType
,
decltype
(
a_block_desc_m0_m1_m2_k0_k1_k2
),
decltype
(
a_thread_desc_
),
Sequence
<
1
,
1
,
1
,
1
,
1
,
KPack
>
,
Sequence
<
0
,
1
,
2
,
3
,
4
,
5
>
,
5
,
A_K1
,
A_K1
>
;
AThreadCopy
a_thread_copy_
{
Base
::
CalculateAThreadOriginDataIndex6D
()};
static
constexpr
auto
b_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
NRepeat
>
{},
I1
,
Number
<
KRepeat
>
{},
Number
<
KPack
>
{}));
static
constexpr
BTileDesc
b_block_desc_n0_n1_k0_k1
;
using
Base
::
c_thread_desc_
;
};
}
// namespace ck
include/ck/tensor_operation/gpu/device/impl/device_moe_gemm.hpp
View file @
854cd8b4
...
@@ -380,11 +380,12 @@ struct DeviceMoeGemm
...
@@ -380,11 +380,12 @@ struct DeviceMoeGemm
// }
// }
// else
// else
{
{
const
auto
kernel
=
kernel_moe_gemm_gather
<
GridwiseGemm
,
const
auto
kernel
=
kernel_moe_gemm
<
GridwiseGemm
,
true
,
true
,
InMemoryDataOperationEnum
::
Set
,
InMemoryDataOperationEnum
::
Set
,
minimum_occupancy
,
minimum_occupancy
,
TailNumber
::
Odd
>
;
IsInputGemm
,
TailNumber
::
Odd
>
;
RunKernel
(
kernel
);
RunKernel
(
kernel
);
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment