Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
3c835d76
Commit
3c835d76
authored
Mar 24, 2023
by
ltqin
Browse files
move scalemask out of inter elementop
parent
05fc2f8e
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
22 additions
and
34 deletions
+22
-34
client_example/08_fused_attention/fused_attention_no_lib.cpp
client_example/08_fused_attention/fused_attention_no_lib.cpp
+21
-1
include/ck/tensor_operation/gpu/element/element_wise_operation.hpp
...k/tensor_operation/gpu/element/element_wise_operation.hpp
+0
-29
library/include/ck/library/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_multiple_d_softmax_gemm_permute_xdl_cshuffle_half_gmk_gnk_gno_gmo_instance.hpp
...mm_permute_xdl_cshuffle_half_gmk_gnk_gno_gmo_instance.hpp
+1
-3
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_multiple_d_softmax_gemm_permute_xdl_cshuffle_gmk_gnk_gno_gmo_instance.cpp
...ax_gemm_permute_xdl_cshuffle_gmk_gnk_gno_gmo_instance.cpp
+0
-1
No files found.
client_example/08_fused_attention/fused_attention_no_lib.cpp
View file @
3c835d76
...
@@ -10,9 +10,29 @@
...
@@ -10,9 +10,29 @@
#include "ck/tensor_operation/gpu/device/device_batched_gemm_softmax_gemm_permute.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_softmax_gemm_permute.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
struct
ScaleMask
{
ScaleMask
(
float
scale
,
float
mask_filter_value
)
:
scale_
(
scale
),
mask_filter_value_
(
mask_filter_value
)
{
}
// scale, masked
template
<
typename
Y
,
typename
X0
,
typename
X1
>
__host__
__device__
constexpr
void
operator
()(
Y
&
y
,
const
X0
&
x
,
const
X1
&
mask
)
const
;
template
<
>
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
,
const
ck
::
half_t
&
mask
)
const
{
float
filter_value
=
(
mask
<
1.0
f
?
mask_filter_value_
:
0.0
f
);
y
=
scale_
*
x
+
filter_value
;
}
const
float
scale_
;
const
float
mask_filter_value_
;
};
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
B0ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
B0ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
Acc0ElementOp
=
ck
::
tensor_operation
::
element_wise
::
ScaleMask
;
using
Acc0ElementOp
=
ScaleMask
;
using
B1ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
B1ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
...
...
include/ck/tensor_operation/gpu/element/element_wise_operation.hpp
View file @
3c835d76
...
@@ -389,35 +389,6 @@ struct UnaryTypeConvert<ck::bhalf_t, float>
...
@@ -389,35 +389,6 @@ struct UnaryTypeConvert<ck::bhalf_t, float>
}
}
};
};
struct
ScaleMask
{
ScaleMask
(
float
scale
,
float
mask_filter_value
)
:
scale_
(
scale
),
mask_filter_value_
(
mask_filter_value
)
{
}
// scale, masked
template
<
typename
Y
,
typename
X0
,
typename
X1
>
__host__
__device__
constexpr
void
operator
()(
Y
&
y
,
const
X0
&
x
,
const
X1
&
mask
)
const
;
template
<
>
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
,
const
int16_t
&
mask
)
const
{
float
filter_value
=
(
mask
==
1
?
0.0
f
:
mask_filter_value_
);
y
=
scale_
*
x
+
filter_value
;
}
template
<
>
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
,
const
half_t
&
mask
)
const
{
float
filter_value
=
(
mask
<
1.0
f
?
mask_filter_value_
:
0.0
f
);
y
=
scale_
*
x
+
filter_value
;
}
const
float
scale_
;
const
float
mask_filter_value_
;
};
struct
ScaleBiasMask
struct
ScaleBiasMask
{
{
ScaleBiasMask
(
float
scale
,
float
mask_filter_value
)
ScaleBiasMask
(
float
scale
,
float
mask_filter_value
)
...
...
library/include/ck/library/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_multiple_d_softmax_gemm_permute_xdl_cshuffle_gmk_gnk_gno_gmo_instance.hpp
→
library/include/ck/library/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_multiple_d_softmax_gemm_permute_xdl_cshuffle_
half_
gmk_gnk_gno_gmo_instance.hpp
View file @
3c835d76
...
@@ -20,9 +20,7 @@ using F32 = float;
...
@@ -20,9 +20,7 @@ using F32 = float;
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ScaleMask
=
ck
::
tensor_operation
::
element_wise
::
ScaleMask
;
using
ScaleBiasMask
=
ck
::
tensor_operation
::
element_wise
::
ScaleBiasMask
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
static
constexpr
auto
GemmPadded
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKOPadding
;
static
constexpr
auto
GemmPadded
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKOPadding
;
...
...
library/src/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute/device_batched_gemm_multiple_d_softmax_gemm_permute_xdl_cshuffle_gmk_gnk_gno_gmo_instance.cpp
View file @
3c835d76
...
@@ -18,7 +18,6 @@ using F16 = ck::half_t;
...
@@ -18,7 +18,6 @@ using F16 = ck::half_t;
using
F32
=
float
;
using
F32
=
float
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ScaleMask
=
ck
::
tensor_operation
::
element_wise
::
ScaleMask
;
using
ScaleBiasMask
=
ck
::
tensor_operation
::
element_wise
::
ScaleBiasMask
;
using
ScaleBiasMask
=
ck
::
tensor_operation
::
element_wise
::
ScaleBiasMask
;
// f16 ScaleBiasMask masking
// f16 ScaleBiasMask masking
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment