Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
c54a1014
Commit
c54a1014
authored
Aug 29, 2023
by
letaoqin
Browse files
fix d0load descriptor name
parent
703ef6d7
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
21 additions
and
33 deletions
+21
-33
example/52_flash_atten_bias/batched_multihead_attention_bias_backward_v2.cpp
...ten_bias/batched_multihead_attention_bias_backward_v2.cpp
+1
-1
include/ck/tensor_operation/gpu/grid/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v1.hpp
...id/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v1.hpp
+14
-20
include/ck/tensor_operation/gpu/grid/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v2.hpp
...id/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v2.hpp
+6
-12
No files found.
example/52_flash_atten_bias/batched_multihead_attention_bias_backward_v2.cpp
View file @
c54a1014
...
...
@@ -25,7 +25,7 @@ Kernel outputs:
#define PRINT_HOST 0
#define USING_MASK 0
#define DIM
128
// DIM should be a multiple of 8.
#define DIM
64
// DIM should be a multiple of 8.
#include <iostream>
#include <numeric>
...
...
include/ck/tensor_operation/gpu/grid/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v1.hpp
View file @
c54a1014
...
...
@@ -1265,24 +1265,18 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V1
static_assert
(
MPerXdl
<=
KPerBlock
);
static_assert
(
D0BlockTransferSrcScalarPerVector
*
NThreadClusterLengths
<=
NPerBlock
,
"D0BlockTransferSrcScalarPerVector * NThreadClusterLengths <= NPerBlock"
);
__host__
__device__
static
constexpr
auto
GetD0BlockDescriptor_M0_N0_M1_M2_N1_M3
()
__host__
__device__
static
constexpr
auto
GetD0Block
Write
Descriptor_M0_N0_M1_M2_N1_M3
()
{
// B1 matrix in LDS memory, dst of blockwise copy
return
make_naive_tensor_descriptor
(
make_tuple
(
I1
,
I1
,
I1
,
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
),
make_tuple
(
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
D0M2
,
I1
));
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
I1
,
I1
,
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
));
}
__host__
__device__
static
constexpr
auto
GetD0BlockReadDescriptor_N0_N1_M0_M1_M2_M3
()
{
constexpr
auto
d0_raw_m0_n_m1
=
make_naive_tensor_descriptor
(
make_tuple
(
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
)
,
make_tuple
(
Number
<
NPerBlock
>
{}
*
D0M2
,
D0M2
,
I1
));
constexpr
auto
d0_n0_n1_m0_m1_m2
_m3
=
transform_tensor_descriptor
(
make_naive_tensor_descriptor
_packed
(
make_tuple
(
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
)
);
constexpr
auto
d0_n0_n1_m0_m1_m2
=
transform_tensor_descriptor
(
d0_raw_m0_n_m1
,
make_tuple
(
make_unmerge_transform
(
make_tuple
(
D0M1
/
I2
,
I2
)),
make_unmerge_transform
(
...
...
@@ -1290,11 +1284,11 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V1
make_pass_through_transform
(
D0M2
)),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{},
Sequence
<
2
>
{}),
make_tuple
(
Sequence
<
2
,
3
>
{},
Sequence
<
0
,
1
>
{},
Sequence
<
4
>
{}));
return
d0_n0_n1_m0_m1_m2
_m3
;
return
d0_n0_n1_m0_m1_m2
;
}
static
constexpr
auto
d0_block_desc_m0_n0_m1_m2_n1_m3
=
GetD0BlockDescriptor_M0_N0_M1_M2_N1_M3
();
static
constexpr
auto
d0_block_desc_n0_n1_m0_m1_m2
_m3
=
GetD0Block
Write
Descriptor_M0_N0_M1_M2_N1_M3
();
static
constexpr
auto
d0_block_desc_n0_n1_m0_m1_m2
=
GetD0BlockReadDescriptor_N0_N1_M0_M1_M2_M3
();
static
constexpr
auto
d0_thread_desc_
=
...
...
@@ -1330,10 +1324,10 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V1
1
>
;
using
D0ThreadCopy
=
ThreadwiseTensorSliceTransfer_v4
<
typename
TypeTransform
<
D0DataType
>::
Type
,
// SrcData
typename
TypeTransform
<
D0DataType
>::
Type
,
// DstData
decltype
(
d0_block_desc_n0_n1_m0_m1_m2
_m3
),
// SrcDesc
decltype
(
d0_thread_desc_
),
// DstDesc
ThreadwiseTensorSliceTransfer_v4
<
typename
TypeTransform
<
D0DataType
>::
Type
,
// SrcData
typename
TypeTransform
<
D0DataType
>::
Type
,
// DstData
decltype
(
d0_block_desc_n0_n1_m0_m1_m2
),
// SrcDesc
decltype
(
d0_thread_desc_
),
// DstDesc
Sequence
<
1
,
1
,
4
,
1
,
4
>
,
// SliceLengths
Sequence
<
0
,
1
,
2
,
3
,
4
>
,
// DimAccessOrder
4
,
// SrcVectorDim
...
...
@@ -2103,7 +2097,7 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V1
d0_block_buf
);
block_sync_lds
();
// read data form lds
d0_thread_copy_lds_to_vgpr
.
Run
(
D0Loader
::
d0_block_desc_n0_n1_m0_m1_m2
_m3
,
d0_thread_copy_lds_to_vgpr
.
Run
(
D0Loader
::
d0_block_desc_n0_n1_m0_m1_m2
,
make_tuple
(
I0
,
I0
,
I0
,
I0
,
I0
),
d0_block_buf
,
D0Loader
::
d0_thread_desc_
,
...
...
include/ck/tensor_operation/gpu/grid/gridwise_batched_mha_bwd_xdl_cshuffle_qloop_b2t_v2.hpp
View file @
c54a1014
...
...
@@ -1196,23 +1196,17 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V2
static_assert
(
NPerXdl
==
32
);
static_assert
(
D0BlockTransferSrcScalarPerVector
*
NThreadClusterLengths
<=
NPerBlock
,
"D0BlockTransferSrcScalarPerVector * NThreadClusterLengths <= NPerBlock"
);
__host__
__device__
static
constexpr
auto
GetD0BlockDescriptor_M0_N0_M1_M2_N1_M3
()
__host__
__device__
static
constexpr
auto
GetD0Block
Write
Descriptor_M0_N0_M1_M2_N1_M3
()
{
// B1 matrix in LDS memory, dst of blockwise copy
return
make_naive_tensor_descriptor
(
make_tuple
(
I1
,
I1
,
I1
,
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
),
make_tuple
(
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
Number
<
NPerBlock
>
{}
*
D0M2
,
D0M2
,
I1
));
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
I1
,
I1
,
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
));
}
__host__
__device__
static
constexpr
auto
GetD0BlockReadDescriptor_N0_N1_M0_M1_M2
()
{
constexpr
auto
d0_raw_m0_n_m1
=
make_naive_tensor_descriptor
(
make_tuple
(
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
)
,
make_tuple
(
Number
<
NPerBlock
>
{}
*
D0M2
,
D0M2
,
I1
));
make_naive_tensor_descriptor
_packed
(
make_tuple
(
D0M1
,
Number
<
NPerBlock
>
{},
D0M2
)
);
constexpr
auto
d0_n0_n1_m0_m1_m2
=
transform_tensor_descriptor
(
d0_raw_m0_n_m1
,
make_tuple
(
make_unmerge_transform
(
make_tuple
(
D0M1
/
I2
,
I2
)),
...
...
@@ -1224,7 +1218,7 @@ struct GridwiseBatchedMultiheadAttentionBackward_Qloop_Xdl_CShuffle_V2
return
d0_n0_n1_m0_m1_m2
;
}
static
constexpr
auto
d0_block_desc_m0_n0_m1_m2_n1_m3
=
GetD0BlockDescriptor_M0_N0_M1_M2_N1_M3
();
GetD0Block
Write
Descriptor_M0_N0_M1_M2_N1_M3
();
static
constexpr
auto
d0_block_desc_n0_n1_m0_m1_m2
=
GetD0BlockReadDescriptor_N0_N1_M0_M1_M2
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment