Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
5fc894f5
Commit
5fc894f5
authored
Sep 06, 2023
by
guangzlu
Browse files
added instances for qloop light v2
parent
5bc83619
Changes
4
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
502 additions
and
1 deletion
+502
-1
library/include/ck/library/tensor_operation_instance/gpu/batched_mha_bwd_qloop_light_v2.hpp
...operation_instance/gpu/batched_mha_bwd_qloop_light_v2.hpp
+189
-0
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/CMakeLists.txt
...ance/gpu/batched_gemm_softmax_gemm_permute/CMakeLists.txt
+2
-1
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_mha_bwd_qloop_light_v2_bf16_bf16_instance.cpp
...ice_batched_mha_bwd_qloop_light_v2_bf16_bf16_instance.cpp
+155
-0
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_mha_bwd_qloop_light_v2_f16_f16_instance.cpp
...evice_batched_mha_bwd_qloop_light_v2_f16_f16_instance.cpp
+156
-0
No files found.
library/include/ck/library/tensor_operation_instance/gpu/batched_mha_bwd_qloop_light_v2.hpp
0 → 100644
View file @
5fc894f5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <vector>
#include <memory>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_softmax_gemm_permute.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_batched_mha_bwd_qloop_light_v2_casual_f16_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
F16
,
F16
,
unsigned
short
,
F32
,
F32
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpecialization
::
MaskUpperTriangleFromTopLeft
>>>&
instances
);
void
add_device_batched_mha_bwd_qloop_light_v2_noncasual_f16_f16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
F16
,
F16
,
unsigned
short
,
F32
,
F32
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpecialization
::
MaskDisabled
>>>&
instances
);
void
add_device_batched_mha_bwd_qloop_light_v2_casual_bf16_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
BF16
,
BF16
,
unsigned
short
,
F32
,
F32
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpecialization
::
MaskUpperTriangleFromTopLeft
>>>&
instances
);
void
add_device_batched_mha_bwd_qloop_light_v2_noncasual_bf16_bf16_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
BF16
,
BF16
,
unsigned
short
,
F32
,
F32
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpecialization
::
MaskDisabled
>>>&
instances
);
template
<
typename
InputDataType
,
typename
OutputDataType
,
typename
ZDataType
,
typename
LSEDataType
,
typename
DDataType
,
MaskingSpecialization
MaskingSpec
>
struct
DeviceOperationInstanceFactory
<
ck
::
tensor_operation
::
device
::
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
InputDataType
,
OutputDataType
,
ZDataType
,
LSEDataType
,
DDataType
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpec
>>
{
using
DeviceOp
=
DeviceBatchedMultiheadAttentionBackwardQloopLightV2
<
2
,
1
,
1
,
1
,
1
,
InputDataType
,
OutputDataType
,
ZDataType
,
LSEDataType
,
DDataType
,
ck
::
Tuple
<>
,
ck
::
Tuple
<>
,
PassThrough
,
PassThrough
,
Scale
,
PassThrough
,
PassThrough
,
MaskingSpec
>
;
static
auto
GetInstances
()
{
std
::
vector
<
std
::
unique_ptr
<
DeviceOp
>>
op_ptrs
;
if
constexpr
(
is_same_v
<
InputDataType
,
half_t
>
&&
is_same_v
<
OutputDataType
,
half_t
>
&&
is_same_v
<
ZDataType
,
unsigned
short
>
&&
is_same_v
<
LSEDataType
,
float
>
&&
is_same_v
<
DDataType
,
float
>
)
{
if
constexpr
(
MaskingSpec
==
MaskingSpecialization
::
MaskUpperTriangleFromTopLeft
)
{
add_device_batched_mha_bwd_qloop_light_v2_casual_f16_f16_instances
(
op_ptrs
);
}
else
if
(
MaskingSpec
==
MaskingSpecialization
::
MaskDisabled
)
{
add_device_batched_mha_bwd_qloop_light_v2_noncasual_f16_f16_instances
(
op_ptrs
);
}
}
else
if
constexpr
(
is_same_v
<
InputDataType
,
BF16
>
&&
is_same_v
<
OutputDataType
,
BF16
>
&&
is_same_v
<
ZDataType
,
unsigned
short
>
&&
is_same_v
<
LSEDataType
,
float
>
&&
is_same_v
<
DDataType
,
float
>
)
{
if
constexpr
(
MaskingSpec
==
MaskingSpecialization
::
MaskUpperTriangleFromTopLeft
)
{
add_device_batched_mha_bwd_qloop_light_v2_casual_bf16_bf16_instances
(
op_ptrs
);
}
else
if
(
MaskingSpec
==
MaskingSpecialization
::
MaskDisabled
)
{
add_device_batched_mha_bwd_qloop_light_v2_noncasual_bf16_bf16_instances
(
op_ptrs
);
}
}
return
op_ptrs
;
}
};
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/CMakeLists.txt
View file @
5fc894f5
...
@@ -7,6 +7,7 @@ add_instance_library(device_batched_gemm_softmax_gemm_permute_instance
...
@@ -7,6 +7,7 @@ add_instance_library(device_batched_gemm_softmax_gemm_permute_instance
device_batched_mha_bwd_qloop_f16_f16_gmk_gnk_gno_gmo_instance.cpp
device_batched_mha_bwd_qloop_f16_f16_gmk_gnk_gno_gmo_instance.cpp
device_batched_mha_bwd_qloop_light_v1_bf16_bf16_instance.cpp
device_batched_mha_bwd_qloop_light_v1_bf16_bf16_instance.cpp
device_batched_mha_bwd_qloop_light_v1_f16_f16_instance.cpp
device_batched_mha_bwd_qloop_light_v1_f16_f16_instance.cpp
device_batched_mha_bwd_qloop_light_v2_bf16_bf16_instance.cpp
device_batched_mha_bwd_qloop_light_v2_f16_f16_instance.cpp
)
)
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_mha_bwd_qloop_light_v2_bf16_bf16_instance.cpp
0 → 100644
View file @
5fc894f5
This diff is collapsed.
Click to expand it.
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_mha_bwd_qloop_light_v2_f16_f16_instance.cpp
0 → 100644
View file @
5fc894f5
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment