Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
5938d555
Commit
5938d555
authored
Jul 04, 2023
by
ltqin
Browse files
add DDattype and DKPerBlock parameter to device
parent
7c686fc2
Changes
4
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
65 additions
and
47 deletions
+65
-47
example/32_batched_gemm_scale_softmax_gemm/batched_multihead_attention_backward_v3.cpp
..._softmax_gemm/batched_multihead_attention_backward_v3.cpp
+39
-38
include/ck/tensor_operation/gpu/device/impl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v1.hpp
...pl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v1.hpp
+8
-4
include/ck/tensor_operation/gpu/device/impl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v2.hpp
...pl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v2.hpp
+8
-4
include/ck/tensor_operation/gpu/grid/gridwise_batched_multihead_attention_bacckward_ydotygrad.hpp
...dwise_batched_multihead_attention_bacckward_ydotygrad.hpp
+10
-1
No files found.
example/32_batched_gemm_scale_softmax_gemm/batched_multihead_attention_backward_v3.cpp
View file @
5938d555
This diff is collapsed.
Click to expand it.
include/ck/tensor_operation/gpu/device/impl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v1.hpp
View file @
5938d555
...
...
@@ -284,6 +284,7 @@ template <index_t NumDimG,
typename
GemmDataType
,
typename
ZDataType
,
typename
LSEDataType
,
typename
DDataType
,
typename
Acc0BiasDataType
,
typename
Acc1BiasDataType
,
typename
GemmAccDataType
,
...
...
@@ -314,6 +315,7 @@ template <index_t NumDimG,
index_t
NXdlPerWave
,
index_t
Gemm1NXdlPerWave
,
index_t
Gemm2NXdlPerWave
,
index_t
DKPerBlock
,
typename
ABlockTransferThreadClusterLengths_AK0_M_AK1
,
typename
ABlockTransferThreadClusterArrangeOrder
,
typename
ABlockTransferSrcAccessOrder
,
...
...
@@ -348,7 +350,6 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V1
static_assert
(
NumAcc0Bias
==
0
&&
NumAcc0Bias
==
0
,
"Bias addition is unimplemented"
);
using
DeviceOp
=
DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V1
;
using
DDataType
=
GemmAccDataType
;
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
...
...
@@ -764,7 +765,7 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V1
DGridDesc_M
,
BlockSize
,
BlockSize
,
32
>
;
DKPerBlock
>
;
// Argument
struct
Argument
:
public
BaseArgument
{
...
...
@@ -1161,7 +1162,10 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V1
}
// TODO: Check if tensor specialization & strides mismatch
if
(
!
GridwiseYDotYGrad
::
CheckValidity
(
arg
.
y_grid_desc_m_o_
,
arg
.
d_block_2_ctile_map_
))
{
return
false
;
}
// Check if C permute dimension matches GEMM + GEMM shape
const
index_t
c_g
=
arg
.
c_grid_desc_g_m_n_
.
GetLength
(
I0
);
// unpadded
const
index_t
c_m
=
arg
.
y_grid_desc_m_o_
.
GetLength
(
I0
);
...
...
include/ck/tensor_operation/gpu/device/impl/device_batched_mha_bwd_xdl_cshuffle_qloop_light_v2.hpp
View file @
5938d555
...
...
@@ -283,6 +283,7 @@ template <index_t NumDimG,
typename
GemmDataType
,
typename
ZDataType
,
typename
LSEDataType
,
typename
DDataType
,
typename
Acc0BiasDataType
,
typename
Acc1BiasDataType
,
typename
GemmAccDataType
,
...
...
@@ -313,6 +314,7 @@ template <index_t NumDimG,
index_t
NXdlPerWave
,
index_t
Gemm1NXdlPerWave
,
index_t
Gemm2NXdlPerWave
,
index_t
DKPerBlock
,
typename
ABlockTransferThreadClusterLengths_AK0_M_AK1
,
typename
ABlockTransferThreadClusterArrangeOrder
,
typename
ABlockTransferSrcAccessOrder
,
...
...
@@ -354,7 +356,6 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V2
static_assert
(
NumAcc0Bias
==
0
&&
NumAcc0Bias
==
0
,
"Bias addition is unimplemented"
);
using
DeviceOp
=
DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V2
;
using
DDataType
=
GemmAccDataType
;
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
...
...
@@ -778,7 +779,7 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V2
DGridDesc_M
,
BlockSize
,
BlockSize
,
64
>
;
DKPerBlock
>
;
// Argument
struct
Argument
:
public
BaseArgument
{
...
...
@@ -1188,7 +1189,10 @@ struct DeviceBatchedMultiheadAttentionBackward_Xdl_CShuffle_V2
}
// TODO: Check if tensor specialization & strides mismatch
if
(
!
GridwiseYDotYGrad
::
CheckValidity
(
arg
.
y_grid_desc_m_o_
,
arg
.
d_block_2_ctile_map_
))
{
return
false
;
}
// Check if C permute dimension matches GEMM + GEMM shape
const
index_t
c_g
=
arg
.
c_grid_desc_g_m_n_
.
GetLength
(
I0
);
// unpadded
const
index_t
c_m
=
arg
.
y_grid_desc_m_o_
.
GetLength
(
I0
);
...
...
include/ck/tensor_operation/gpu/grid/gridwise_batched_multihead_attention_bacckward_ydotygrad.hpp
View file @
5938d555
...
...
@@ -36,6 +36,7 @@ struct GridwiseBatchedMultiheadAttentionBackward_YDotYGrad
static
constexpr
auto
I4
=
Number
<
4
>
{};
static
constexpr
auto
WaveSize
=
64
;
static_assert
(
BlockSize
==
MPerBlock
,
"BlockSize must be same with MPerBlock"
);
// block_id to matrix tile idx (m0, n0) mapping are controlled by {M01, N01}
template
<
typename
Block2CTileMap
>
...
...
@@ -46,7 +47,15 @@ struct GridwiseBatchedMultiheadAttentionBackward_YDotYGrad
{
return
false
;
}
const
auto
M
=
c_grid_desc_m_n
.
GetLength
(
I0
);
if
(
M
<
MPerBlock
)
{
return
false
;
}
if
(
M
%
MPerBlock
!=
0
)
{
return
false
;
}
// TODO: also check validity of all components (blockwise-copy, threadwise-copy, etc)
return
true
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment