Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
1f04cd2b
Unverified
Commit
1f04cd2b
authored
Sep 04, 2023
by
Dan Yao
Committed by
GitHub
Sep 04, 2023
Browse files
Merge pull request #888 from ROCmSoftwarePlatform/mha-dropout-update2
Batched dropout device-op update
parents
3c1d5412
6d2e3152
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
70 additions
and
103 deletions
+70
-103
example/52_flash_atten_bias/batched_multihead_attention_bias_forward_v2_zcheck.cpp
...as/batched_multihead_attention_bias_forward_v2_zcheck.cpp
+19
-25
example/52_flash_atten_bias/run_batched_multihead_attention_bias_forward_zcheck.inc
...s/run_batched_multihead_attention_bias_forward_zcheck.inc
+0
-4
include/ck/tensor_operation/gpu/device/impl/device_batched_dropout.hpp
...nsor_operation/gpu/device/impl/device_batched_dropout.hpp
+51
-74
No files found.
example/52_flash_atten_bias/batched_multihead_attention_bias_forward_v2_zcheck.cpp
View file @
1f04cd2b
...
@@ -301,31 +301,25 @@ using DeviceGemmInstance =
...
@@ -301,31 +301,25 @@ using DeviceGemmInstance =
Deterministic
>
;
Deterministic
>
;
#endif
#endif
using
DeviceDropoutInstance
=
using
DeviceDropoutInstance
=
ck
::
tensor_operation
::
device
::
DeviceBatchedDropout
<
NumDimG
,
ck
::
tensor_operation
::
device
::
DeviceBatchedDropout
<
NumDimG
,
GemmDataType
,
NumDimM
,
ZDataType
,
NumDimN
,
GemmDataType
,
NumDimK
,
GemmSpec
,
NumDimO
,
TensorSpecA
,
GemmDataType
,
TensorSpecB0
,
ZDataType
,
TensorSpecB1
,
GemmDataType
,
TensorSpecC
,
GemmSpec
,
256
,
// BlockSize
TensorSpecA
,
64
,
// MPerBlock
TensorSpecB0
,
128
,
// NPerBlock
TensorSpecB1
,
32
,
// KPerBlock
TensorSpecC
,
8
,
// AK1
256
,
// BlockSize
8
,
// BK1
64
,
// MPerBlock
32
,
// MPerXDL
128
,
// NPerBlock
32
,
// NPerXDL
32
,
// KPerBlock
2
,
// MXdlPerWave
128
,
// Gemm1NPerBlock
1
>
;
// NXdlPerWave
8
,
// AK1
8
,
// BK1
32
,
// MPerXDL
32
,
// NPerXDL
2
,
// MXdlPerWave
1
>
;
// NXdlPerWave
#include "run_batched_multihead_attention_bias_forward_zcheck.inc"
#include "run_batched_multihead_attention_bias_forward_zcheck.inc"
...
...
example/52_flash_atten_bias/run_batched_multihead_attention_bias_forward_zcheck.inc
View file @
1f04cd2b
...
@@ -233,10 +233,6 @@ int run(int argc, char* argv[])
...
@@ -233,10 +233,6 @@ int run(int argc, char* argv[])
auto
dropout_arg
=
auto
dropout_arg
=
dropout_op
.
MakeArgument
(
static_cast
<
ZDataType
*>
(
z_device_buf_2
.
GetDeviceBuffer
()),
dropout_op
.
MakeArgument
(
static_cast
<
ZDataType
*>
(
z_device_buf_2
.
GetDeviceBuffer
()),
a_gs_ms_ks_lengths
,
{
0
,
0
,
0
,
0
},
b0_gs_ns_ks_lengths
,
{
0
,
0
,
0
,
0
},
z_gs_ms_ns_lengths
,
z_gs_ms_ns_lengths
,
z_gs_ms_ns_strides
,
z_gs_ms_ns_strides
,
{
seed
,
offset
});
{
seed
,
offset
});
...
...
include/ck/tensor_operation/gpu/device/impl/device_batched_dropout.hpp
View file @
1f04cd2b
...
@@ -85,10 +85,6 @@ __global__ void
...
@@ -85,10 +85,6 @@ __global__ void
// ^^^^^^ (Acc0)
// ^^^^^^ (Acc0)
// ^^^^^^^^^^^ (Acc1)
// ^^^^^^^^^^^ (Acc1)
template
<
index_t
NumDimG
,
template
<
index_t
NumDimG
,
index_t
NumDimM
,
index_t
NumDimN
,
index_t
NumDimK
,
index_t
NumDimO
,
// NumDimGemm1N
typename
GemmDataType
,
typename
GemmDataType
,
typename
ZDataType
,
typename
ZDataType
,
typename
GemmAccDataType
,
typename
GemmAccDataType
,
...
@@ -101,7 +97,6 @@ template <index_t NumDimG,
...
@@ -101,7 +97,6 @@ template <index_t NumDimG,
index_t
MPerBlock
,
index_t
MPerBlock
,
index_t
NPerBlock
,
// Gemm0NPerBlock
index_t
NPerBlock
,
// Gemm0NPerBlock
index_t
KPerBlock
,
// Gemm0KPerBlock
index_t
KPerBlock
,
// Gemm0KPerBlock
index_t
Gemm1NPerBlock
,
index_t
AK1
,
index_t
AK1
,
index_t
BK1
,
index_t
BK1
,
index_t
MPerXDL
,
index_t
MPerXDL
,
...
@@ -110,17 +105,18 @@ template <index_t NumDimG,
...
@@ -110,17 +105,18 @@ template <index_t NumDimG,
index_t
NXdlPerWave
>
index_t
NXdlPerWave
>
struct
DeviceBatchedDropout
:
public
ck
::
tensor_operation
::
device
::
BaseOperator
struct
DeviceBatchedDropout
:
public
ck
::
tensor_operation
::
device
::
BaseOperator
{
{
static_assert
(
NumDimG
>
0
&&
NumDimM
>
0
&&
NumDimN
>
0
&&
NumDimK
>
0
&&
NumDimO
>
0
,
static_assert
(
NumDimG
>
0
,
"Number of dimension must be greater than 0"
);
"Number of dimension must be greater than 0"
);
using
DeviceOp
=
DeviceBatchedDropout
;
using
DeviceOp
=
DeviceBatchedDropout
;
static
constexpr
index_t
Gemm1NPerBlock
=
128
;
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
auto
I2
=
Number
<
2
>
{};
static
constexpr
auto
I2
=
Number
<
2
>
{};
using
Transform
=
TransformBatchedContractionContractionToBatchedGemmGemm
<
using
Transform
=
TransformBatchedContractionContractionToBatchedGemmGemm
<
Sequence
<
NumDimG
,
NumDimM
,
NumDimN
,
NumDimK
,
NumDimO
>
,
Sequence
<
NumDimG
,
1
,
1
,
1
,
1
>
,
//
NumDimM, NumDimN, NumDimK, NumDimO
Sequence
<
MPerBlock
,
NPerBlock
,
KPerBlock
,
Gemm1NPerBlock
>
,
Sequence
<
MPerBlock
,
NPerBlock
,
KPerBlock
,
Gemm1NPerBlock
>
,
GemmSpec
,
GemmSpec
,
ASpec
,
ASpec
,
...
@@ -128,31 +124,19 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
...
@@ -128,31 +124,19 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
B1Spec
,
B1Spec
,
CSpec
>
;
CSpec
>
;
/*
Descriptors for inputs:
Q, K, V, Y, dY, per-row softmax stats
Descriptors for outputs:
dQ, dK, dV
*/
// Q in Gemm A position
// Q in Gemm A position
static
auto
MakeAGridDescriptor_AK0_M_AK1
(
const
std
::
vector
<
index_t
>&
a_gs_m
s
_k
s
_lengths
,
static
auto
MakeAGridDescriptor_AK0_M_AK1
(
const
std
::
vector
<
index_t
>&
a_gs_m_k_lengths
,
const
std
::
vector
<
index_t
>&
a_gs_m
s
_k
s
_strides
)
const
std
::
vector
<
index_t
>&
a_gs_m_k_strides
)
{
{
return
Transform
::
MakeAGridDescriptor_AK0_M_AK1
(
return
Transform
::
MakeAGridDescriptor_AK0_M_AK1
(
Transform
::
MakeAGridDescriptor_M_K
(
a_gs_ms_ks_lengths
,
a_gs_ms_ks_strides
),
Transform
::
MakeAGridDescriptor_M_K
(
a_gs_m_k_lengths
,
a_gs_m_k_strides
),
Number
<
AK1
>
{});
Number
<
AK1
>
{});
}
}
// Z in Gemm0 C position
// Z in Gemm0 C position
static
auto
MakeZGridDescriptor_M_N
(
const
std
::
vector
<
index_t
>&
z_gs_m
s
_n
s
_lengths
,
static
auto
MakeZGridDescriptor_M_N
(
const
std
::
vector
<
index_t
>&
z_gs_m_n_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_m
s
_n
s
_strides
)
const
std
::
vector
<
index_t
>&
z_gs_m_n_strides
)
{
{
return
Transform
::
MakeCGridDescriptor_M_N
(
z_gs_m
s
_n
s
_lengths
,
z_gs_m
s
_n
s
_strides
);
return
Transform
::
MakeCGridDescriptor_M_N
(
z_gs_m_n_lengths
,
z_gs_m_n_strides
);
}
}
using
ZGridDesc_G_M_N
=
decltype
(
Transform
::
MakeCGridDescriptor_G_M_N
({},
{}));
using
ZGridDesc_G_M_N
=
decltype
(
Transform
::
MakeCGridDescriptor_G_M_N
({},
{}));
...
@@ -198,23 +182,18 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
...
@@ -198,23 +182,18 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
struct
Argument
:
public
BaseArgument
struct
Argument
:
public
BaseArgument
{
{
Argument
(
ZDataType
*
p_z_grid
,
Argument
(
ZDataType
*
p_z_grid
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_lengths
,
const
std
::
vector
<
index_t
>&
b_gs_n_k_lengths
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_strides
,
const
std
::
vector
<
index_t
>&
b_gs_n_k_strides
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_lengths
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_strides
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_strides
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_strides
,
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
:
p_z_grid_
{
p_z_grid
},
:
p_z_grid_
{
p_z_grid
},
z_grid_desc_m_n_
{
MakeZGridDescriptor_M_N
(
z_gs_m
s
_n
s
_lengths
,
z_gs_m
s
_n
s
_strides
)},
z_grid_desc_m_n_
{
MakeZGridDescriptor_M_N
(
z_gs_m_n_lengths
,
z_gs_m_n_strides
)},
k_grid_desc_n_k_
{
k_grid_desc_n_k_
{
Transform
::
MakeB0GridDescriptor_N_K
(
b_gs_n
s
_k
s
_lengths
,
b_gs_n
s
_k
s
_strides
)},
Transform
::
MakeB0GridDescriptor_N_K
(
b_gs_n_k_lengths
,
b_gs_n_k_strides
)},
z_grid_desc_g_m_n_
{
z_grid_desc_g_m_n_
{
Transform
::
MakeCGridDescriptor_G_M_N
(
z_gs_m
s
_n
s
_lengths
,
z_gs_m
s
_n
s
_strides
)},
Transform
::
MakeCGridDescriptor_G_M_N
(
z_gs_m_n_lengths
,
z_gs_m_n_strides
)},
block_2_ctile_map_
{
GridwiseDropout
::
MakeDefaultBlock2CTileMap
(
k_grid_desc_n_k_
)},
block_2_ctile_map_
{
GridwiseDropout
::
MakeDefaultBlock2CTileMap
(
k_grid_desc_n_k_
)},
raw_lengths_mz_nz_kz_gemm1nz_
{
a_gs_ms_ks_lengths
[
NumDimG
+
NumDimM
-
1
],
b_gs_ns_ks_lengths
[
NumDimG
+
NumDimN
-
1
],
b_gs_ns_ks_lengths
[
NumDimG
+
NumDimN
+
NumDimK
-
1
]},
batch_count_
{
z_grid_desc_g_m_n_
.
GetLength
(
I0
)}
batch_count_
{
z_grid_desc_g_m_n_
.
GetLength
(
I0
)}
{
{
...
@@ -226,15 +205,22 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
...
@@ -226,15 +205,22 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
z_grid_desc_m0_n0_m1_n1_m2_n2_m3_m4_m5_n3_
=
z_grid_desc_m0_n0_m1_n1_m2_n2_m3_m4_m5_n3_
=
GridwiseDropout
::
MakeCGridDescriptor_M0_N0_M1_N1_M2_N2_M3_M4_M5_N3
(
GridwiseDropout
::
MakeCGridDescriptor_M0_N0_M1_N1_M2_N2_M3_M4_M5_N3
(
z_grid_desc_m_n_
);
z_grid_desc_m_n_
);
// Print();
auto
m_raw
=
z_gs_m_n_lengths
[
NumDimG
];
auto
n_raw
=
z_gs_m_n_lengths
[
NumDimG
+
1
];
m_raw_padded_
=
GridwiseDropout
::
GetPaddedSize
(
m_raw
);
n_raw_padded_
=
GridwiseDropout
::
GetPaddedSize
(
n_raw
);
std
::
vector
<
index_t
>
a_gs_m_k_strides
(
NumDimG
+
2
,
0
);
std
::
vector
<
index_t
>
a_gs_m_k_lengths
=
z_gs_m_n_lengths
;
a_gs_m_k_lengths
[
NumDimG
+
1
]
=
1
;
auto
a_grid_desc_k0_m_k1
=
auto
a_grid_desc_k0_m_k1
=
DeviceOp
::
MakeAGridDescriptor_AK0_M_AK1
(
a_gs_m
s
_k
s
_lengths
,
a_gs_m
s
_k
s
_strides
);
DeviceOp
::
MakeAGridDescriptor_AK0_M_AK1
(
a_gs_m_k_lengths
,
a_gs_m_k_strides
);
num_gemm0_m_block_outer_loop_
=
a_grid_desc_k0_m_k1
.
GetLength
(
I1
)
/
MPerBlock
;
num_gemm0_m_block_outer_loop_
=
a_grid_desc_k0_m_k1
.
GetLength
(
I1
)
/
MPerBlock
;
m_raw_padded_
=
GridwiseDropout
::
GetPaddedSize
(
raw_lengths_mz_nz_kz_gemm1nz_
[
0
]);
n_raw_padded_
=
GridwiseDropout
::
GetPaddedSize
(
raw_lengths_mz_nz_kz_gemm1nz_
[
1
]);
}
}
// pointers
// pointers
...
@@ -253,9 +239,6 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
...
@@ -253,9 +239,6 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
// block-to-c-tile map
// block-to-c-tile map
typename
GridwiseDropout
::
DefaultBlock2CTileMap
block_2_ctile_map_
;
typename
GridwiseDropout
::
DefaultBlock2CTileMap
block_2_ctile_map_
;
// For robust IsSupportedArgument() check
std
::
vector
<
index_t
>
raw_lengths_mz_nz_kz_gemm1nz_
;
index_t
num_gemm0_m_block_outer_loop_
;
index_t
num_gemm0_m_block_outer_loop_
;
index_t
batch_count_
;
index_t
batch_count_
;
...
@@ -348,45 +331,39 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
...
@@ -348,45 +331,39 @@ struct DeviceBatchedDropout : public ck::tensor_operation::device::BaseOperator
}
}
static
auto
MakeArgument
(
ZDataType
*
p_z
,
static
auto
MakeArgument
(
ZDataType
*
p_z
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_lengths
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_strides
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_strides
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_lengths
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_strides
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_strides
,
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
{
{
return
Argument
{
p_z
,
std
::
vector
<
index_t
>
b_gs_n_k_strides
(
NumDimG
+
2
,
0
);
a
_gs_m
s_ks
_lengths
,
std
::
vector
<
index_t
>
b_gs_n_k_lengths
=
z
_gs_m
_n
_lengths
;
a_gs_ms_ks_strides
,
b_gs_ns_ks_lengths
,
b_gs_n_k_lengths
[
NumDimG
]
=
z_gs_m_n_lengths
[
NumDimG
+
1
];
b_gs_n
s
_k
s_strides
,
b_gs_n_k
_lengths
[
NumDimG
+
1
]
=
1
;
z_gs_ms_ns_lengths
,
z_gs_ms_ns_strides
,
return
Argument
{
seeds
};
p_z
,
b_gs_n_k_lengths
,
b_gs_n_k_strides
,
z_gs_m_n_lengths
,
z_gs_m_n_strides
,
seeds
};
}
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
// polymorphic
// FIXME: constness
std
::
unique_ptr
<
BaseArgument
>
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
void
*
p_z
,
MakeArgumentPointer
(
void
*
p_z
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_lengths
,
const
std
::
vector
<
index_t
>&
a_gs_ms_ks_strides
,
const
std
::
vector
<
index_t
>&
z_gs_m_n_strides
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_lengths
,
const
std
::
vector
<
index_t
>&
b_gs_ns_ks_strides
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_lengths
,
const
std
::
vector
<
index_t
>&
z_gs_ms_ns_strides
,
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
// override
std
::
tuple
<
unsigned
long
long
,
unsigned
long
long
>
seeds
)
// override
{
{
std
::
vector
<
index_t
>
b_gs_n_k_strides
(
NumDimG
+
2
,
0
);
std
::
vector
<
index_t
>
b_gs_n_k_lengths
=
z_gs_m_n_lengths
;
b_gs_n_k_lengths
[
NumDimG
]
=
z_gs_m_n_lengths
[
NumDimG
+
1
];
b_gs_n_k_lengths
[
NumDimG
+
1
]
=
1
;
return
std
::
make_unique
<
Argument
>
(
static_cast
<
ZDataType
*>
(
p_z
),
return
std
::
make_unique
<
Argument
>
(
static_cast
<
ZDataType
*>
(
p_z
),
a_gs_ms_ks_lengths
,
b_gs_n_k_lengths
,
a_gs_ms_ks_strides
,
b_gs_n_k_strides
,
b_gs_ns_ks_lengths
,
z_gs_m_n_lengths
,
b_gs_ns_ks_strides
,
z_gs_m_n_strides
,
z_gs_ms_ns_lengths
,
z_gs_ms_ns_strides
,
seeds
);
seeds
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment