Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
bd0f0686
Commit
bd0f0686
authored
Jul 09, 2022
by
Jing Zhang
Browse files
merge develop
parents
e9b1000f
63914743
Changes
382
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2737 additions
and
74 deletions
+2737
-74
example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
..._gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
+15
-15
example/21_gemm_layernorm/CMakeLists.txt
example/21_gemm_layernorm/CMakeLists.txt
+1
-0
example/21_gemm_layernorm/gemm_bias_relu_add_layernorm_xdl_fp16.cpp
..._gemm_layernorm/gemm_bias_relu_add_layernorm_xdl_fp16.cpp
+10
-10
example/21_gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
..._gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
+289
-0
example/23_softmax/softmax_blockwise.cpp
example/23_softmax/softmax_blockwise.cpp
+6
-3
example/24_batched_gemm_c_permute/CMakeLists.txt
example/24_batched_gemm_c_permute/CMakeLists.txt
+2
-0
example/24_batched_gemm_c_permute/batched_gemm_c_permute_xdl_fp16.cpp
...atched_gemm_c_permute/batched_gemm_c_permute_xdl_fp16.cpp
+245
-0
example/25_gemm_bias_c_permute/CMakeLists.txt
example/25_gemm_bias_c_permute/CMakeLists.txt
+1
-0
example/25_gemm_bias_c_permute/gemm_bias_c_permute_xdl_fp16.cpp
...e/25_gemm_bias_c_permute/gemm_bias_c_permute_xdl_fp16.cpp
+284
-0
example/26_contraction/CMakeLists.txt
example/26_contraction/CMakeLists.txt
+2
-0
example/26_contraction/README.md
example/26_contraction/README.md
+20
-0
example/26_contraction/contraction_bilinear_xdl_fp32.cpp
example/26_contraction/contraction_bilinear_xdl_fp32.cpp
+444
-0
example/26_contraction/contraction_scale_xdl_fp32.cpp
example/26_contraction/contraction_scale_xdl_fp32.cpp
+424
-0
example/CMakeLists.txt
example/CMakeLists.txt
+4
-1
include/ck/ck.hpp
include/ck/ck.hpp
+5
-10
include/ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp
...eration/gpu/device/convolution_forward_specialization.hpp
+1
-1
include/ck/tensor_operation/gpu/device/device_batched_gemm.hpp
...de/ck/tensor_operation/gpu/device/device_batched_gemm.hpp
+40
-17
include/ck/tensor_operation/gpu/device/device_batched_gemm_c_permute.hpp
...or_operation/gpu/device/device_batched_gemm_c_permute.hpp
+48
-0
include/ck/tensor_operation/gpu/device/device_batched_gemm_c_permute_xdl.hpp
...peration/gpu/device/device_batched_gemm_c_permute_xdl.hpp
+860
-0
include/ck/tensor_operation/gpu/device/device_batched_gemm_xdl.hpp
...k/tensor_operation/gpu/device/device_batched_gemm_xdl.hpp
+36
-17
No files found.
example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
View file @
bd0f0686
...
@@ -156,16 +156,16 @@ int main(int argc, char* argv[])
...
@@ -156,16 +156,16 @@ int main(int argc, char* argv[])
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D1DataType
>
{
0.0
,
1.0
});
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D1DataType
>
{
0.0
,
1.0
});
}
}
DeviceMem
a_
m_k_
device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_
k_n_
device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d0_
m_n_
device_buf
(
sizeof
(
D0DataType
)
*
d0_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d0_device_buf
(
sizeof
(
D0DataType
)
*
d0_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d1_
m_n_
device_buf
(
sizeof
(
D1DataType
)
*
d1_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d1_device_buf
(
sizeof
(
D1DataType
)
*
d1_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
e_
m_n_
device_buf
(
sizeof
(
EDataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
e_device_buf
(
sizeof
(
EDataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpace
());
a_
m_k_
device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
a_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_
k_n_
device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
d0_
m_n_
device_buf
.
ToDevice
(
d0_m_n
.
mData
.
data
());
d0_device_buf
.
ToDevice
(
d0_m_n
.
mData
.
data
());
d1_
m_n_
device_buf
.
ToDevice
(
d1_m_n
.
mData
.
data
());
d1_device_buf
.
ToDevice
(
d1_m_n
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
b_element_op
=
BElementOp
{};
...
@@ -175,11 +175,11 @@ int main(int argc, char* argv[])
...
@@ -175,11 +175,11 @@ int main(int argc, char* argv[])
auto
device_op
=
DeviceOpInstance
{};
auto
device_op
=
DeviceOpInstance
{};
auto
invoker
=
device_op
.
MakeInvoker
();
auto
invoker
=
device_op
.
MakeInvoker
();
auto
argument
=
auto
argument
=
device_op
.
MakeArgument
(
a_
m_k_
device_buf
.
GetDeviceBuffer
(),
device_op
.
MakeArgument
(
a_device_buf
.
GetDeviceBuffer
(),
b_
k_n_
device_buf
.
GetDeviceBuffer
(),
b_device_buf
.
GetDeviceBuffer
(),
std
::
array
<
const
void
*
,
2
>
{
d0_
m_n_
device_buf
.
GetDeviceBuffer
(),
std
::
array
<
const
void
*
,
2
>
{
d0_device_buf
.
GetDeviceBuffer
(),
d1_
m_n_
device_buf
.
GetDeviceBuffer
()},
d1_device_buf
.
GetDeviceBuffer
()},
e_
m_n_
device_buf
.
GetDeviceBuffer
(),
e_device_buf
.
GetDeviceBuffer
(),
M
,
M
,
N
,
N
,
K
,
K
,
...
@@ -239,7 +239,7 @@ int main(int argc, char* argv[])
...
@@ -239,7 +239,7 @@ int main(int argc, char* argv[])
}
}
}
}
e_
m_n_
device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
e_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
return
ck
::
utils
::
check_err
(
e_m_n_device_result
.
mData
,
e_m_n_host_result
.
mData
)
?
0
:
1
;
return
ck
::
utils
::
check_err
(
e_m_n_device_result
.
mData
,
e_m_n_host_result
.
mData
)
?
0
:
1
;
}
}
...
...
example/21_gemm_layernorm/CMakeLists.txt
View file @
bd0f0686
add_example_executable
(
example_gemm_bias_relu_add_layernorm_xdl_fp16 gemm_bias_relu_add_layernorm_xdl_fp16.cpp
)
add_example_executable
(
example_gemm_bias_relu_add_layernorm_xdl_fp16 gemm_bias_relu_add_layernorm_xdl_fp16.cpp
)
add_example_executable
(
example_gemm_layernorm_xdl_fp16 gemm_layernorm_xdl_fp16.cpp
)
add_example_executable
(
example_gemm_layernorm_xdl_fp16 gemm_layernorm_xdl_fp16.cpp
)
add_example_executable
(
example_gemm_xdl_layernorm_single_kernel_fp16 gemm_xdl_layernorm_single_kernel_fp16.cpp
)
example/21_gemm_layernorm/gemm_bias_relu_add_layernorm_xdl_fp16.cpp
View file @
bd0f0686
...
@@ -166,15 +166,15 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
...
@@ -166,15 +166,15 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
for
(
int
m
=
0
;
m
<
M
;
++
m
)
for
(
int
m
=
0
;
m
<
M
;
++
m
)
for
(
int
n
=
0
;
n
<
N
;
++
n
)
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
{
AccDataType
acc
=
AccDataType
acc
=
ck
::
type_convert
<
AccDataType
>
(
c_m_n
(
m
,
n
))
+
static_cast
<
AccDataType
>
(
c_m_n
(
m
,
n
))
+
static_cas
t
<
AccDataType
>
(
bias_n
(
n
));
ck
::
type_conver
t
<
AccDataType
>
(
bias_n
(
n
));
AccDataType
c1
=
static_cas
t
<
AccDataType
>
(
c1_m_n
(
m
,
n
));
AccDataType
c1
=
ck
::
type_conver
t
<
AccDataType
>
(
c1_m_n
(
m
,
n
));
c_element_op
(
acc
,
acc
);
c_element_op
(
acc
,
acc
);
c1_element_op
(
c1
,
c1
);
c1_element_op
(
c1
,
c1
);
acc
+=
c1
;
acc
+=
c1
;
c_m_n
(
m
,
n
)
=
static_cas
t
<
CDataType
>
(
acc
);
c_m_n
(
m
,
n
)
=
ck
::
type_conver
t
<
CDataType
>
(
acc
);
}
}
// reduce_mean and reduce_square_mean
// reduce_mean and reduce_square_mean
...
@@ -208,12 +208,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
...
@@ -208,12 +208,12 @@ void host_gemm_layernorm(Tensor<LayerNormOutDataType>& out_m_n,
{
{
AccDataType
out_acc
=
0
;
AccDataType
out_acc
=
0
;
layerNormInst
(
out_acc
,
layerNormInst
(
out_acc
,
static_cas
t
<
AccDataType
>
(
c_m_n
(
m
,
n
)),
ck
::
type_conver
t
<
AccDataType
>
(
c_m_n
(
m
,
n
)),
static_cas
t
<
AccDataType
>
(
mean_m
(
m
)),
ck
::
type_conver
t
<
AccDataType
>
(
mean_m
(
m
)),
static_cas
t
<
AccDataType
>
(
meanSquare_m
(
m
)),
ck
::
type_conver
t
<
AccDataType
>
(
meanSquare_m
(
m
)),
static_cas
t
<
AccDataType
>
(
gamma_n
(
n
)),
ck
::
type_conver
t
<
AccDataType
>
(
gamma_n
(
n
)),
static_cas
t
<
AccDataType
>
(
beta_n
(
n
)));
ck
::
type_conver
t
<
AccDataType
>
(
beta_n
(
n
)));
out_m_n
(
m
,
n
)
=
static_cas
t
<
ReduceDataType
>
(
out_acc
);
out_m_n
(
m
,
n
)
=
ck
::
type_conver
t
<
ReduceDataType
>
(
out_acc
);
}
}
}
}
}
}
...
...
example/21_gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
0 → 100644
View file @
bd0f0686
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include "ck/ck.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_layernorm_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/utility/reduction_operator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm_layernorm.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
// This example demonstrate a single kernel that runs GEMM layer and laynorm in one fused kernel
//
// The GEMM + Layernorm implementation is a specialized kernel which allows fusing both layers
// together given the condition GEMM extents N of MNK is spanned by a single workgroup. For example,
// a kernel configured with NPerBlock = 128 allows to operate on all GEMM sizes if N <= 128
//
// D = Layernorm(acc_element_op(A * B + broadcast(bias)) + add) * broadcast(gamma) + broadcast(beta)
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
ADataType
=
F16
;
using
BDataType
=
F16
;
using
CDataType
=
F16
;
using
C0DataType
=
F16
;
using
AccDataType
=
F32
;
using
CShuffleDataType
=
F16
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
struct
Relu
{
template
<
typename
OutT
,
typename
InT
>
__host__
__device__
void
operator
()(
OutT
&
y
,
const
InT
&
x
)
const
{
y
=
x
>
0
?
x
:
0
;
}
};
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
// Elementwise operation that operates on the output of matrix multiplication
// i.e., AccElementOp(A * B + bias)
using
AccElementOp
=
Relu
;
// Elementwise operation that operates on the output of layer normalization
using
CElementOp
=
Relu
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
// clang-format off
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmLayerNorm_Xdl_CShuffle
//######| ALayout| BLayout| CLayout| AData| BData| CData| C0Data| GemmAcc| CShuffle| ReduceAcc| A| B| Acc| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| CReduce| CReduceThreadCopy|
//######| | | | Type| Type| Type| Type| DataType| DataType| DataType| Elementwise| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| ExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| ExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MPerBlock| ScalarPerVector| ThreadClusterLengths| SrcDstScalarPerVector|
//######| | | | | | | | | | | Operation| Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NPerBlock| _NPerBlock| _MPerBlock_NPerBlock| _NPerBlock|
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
<
Row
,
Col
,
Row
,
ADataType
,
BDataType
,
CDataType
,
C0DataType
,
AccDataType
,
CShuffleDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
AccElementOp
,
CElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
2
,
S
<
1
,
32
,
1
,
8
>
,
8
,
S
<
64
,
4
>
,
4
>
;
// clang-format on
using
ReferenceInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemmLayernorm
<
ADataType
,
BDataType
,
CDataType
,
C0DataType
,
AccDataType
,
AElementOp
,
BElementOp
,
AccElementOp
,
CElementOp
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
// GEMM shape
ck
::
index_t
M
=
3840
;
ck
::
index_t
N
=
128
;
ck
::
index_t
K
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideC
=
128
;
if
(
argc
==
1
)
{
// do nothing
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
if
(
argc
==
10
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
M
=
std
::
stoi
(
argv
[
4
]);
N
=
std
::
stoi
(
argv
[
5
]);
K
=
std
::
stoi
(
argv
[
6
]);
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
StrideC
=
std
::
stoi
(
argv
[
9
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=n0, 1=yes)
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC
\n
"
);
exit
(
0
);
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
};
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
CDataType
>
c_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
CDataType
>
c_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
AccDataType
>
acc_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
C0DataType
>
c0_n_bias
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
Tensor
<
C0DataType
>
c0_m_n_add
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
C0DataType
>
c0_n_gamma
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
Tensor
<
C0DataType
>
c0_n_beta
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c_m_n: "
<<
c_m_n_host_result
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_bias: "
<<
c0_n_bias
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_m_n_add: "
<<
c0_m_n_add
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_gamma: "
<<
c0_n_gamma
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_beta: "
<<
c0_n_beta
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
break
;
case
2
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
break
;
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_Sequential
<
0
>
{});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_Sequential
<
1
>
{});
}
c0_n_bias
.
GenerateTensorValue
(
GeneratorTensor_2
<
C0DataType
>
{
-
5
,
5
});
c0_m_n_add
.
GenerateTensorValue
(
GeneratorTensor_2
<
C0DataType
>
{
-
5
,
5
});
c0_n_gamma
.
GenerateTensorValue
(
GeneratorTensor_2
<
C0DataType
>
{
0
,
2
});
c0_n_beta
.
GenerateTensorValue
(
GeneratorTensor_2
<
C0DataType
>
{
0
,
5
});
c_m_n_host_result
.
GenerateTensorValue
(
GeneratorTensor_1
<
CDataType
>
{
0
});
acc_m_n_host_result
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
0
});
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c_device_buf
(
sizeof
(
CDataType
)
*
c_m_n_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_bias_buf
(
sizeof
(
C0DataType
)
*
c0_n_bias
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_add_buf
(
sizeof
(
C0DataType
)
*
c0_m_n_add
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_gamma_buf
(
sizeof
(
C0DataType
)
*
c0_n_gamma
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_beta_buf
(
sizeof
(
C0DataType
)
*
c0_n_beta
.
mDesc
.
GetElementSpace
());
a_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
c0_bias_buf
.
ToDevice
(
c0_n_bias
.
mData
.
data
());
c0_add_buf
.
ToDevice
(
c0_m_n_add
.
mData
.
data
());
c0_gamma_buf
.
ToDevice
(
c0_n_gamma
.
mData
.
data
());
c0_beta_buf
.
ToDevice
(
c0_n_beta
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
acc_element_op
=
AccElementOp
{};
auto
c_element_op
=
CElementOp
{};
// do GEMM
auto
gemm
=
DeviceGemmInstance
{};
auto
invoker
=
gemm
.
MakeInvoker
();
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c_device_buf
.
GetDeviceBuffer
()),
static_cast
<
C0DataType
*>
(
c0_add_buf
.
GetDeviceBuffer
()),
static_cast
<
C0DataType
*>
(
c0_bias_buf
.
GetDeviceBuffer
()),
static_cast
<
C0DataType
*>
(
c0_gamma_buf
.
GetDeviceBuffer
()),
static_cast
<
C0DataType
*>
(
c0_beta_buf
.
GetDeviceBuffer
()),
M
,
N
,
K
,
StrideA
,
StrideB
,
StrideC
,
a_element_op
,
b_element_op
,
acc_element_op
,
c_element_op
);
if
(
!
gemm
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! device_gemm with the specified compilation parameters does "
"not support this GEMM problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
// extra 6MN flops due to: bias + add + gamma + beta + norm_sub + norm_div,
// excluding reduction steps
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
+
std
::
size_t
(
6
)
*
M
*
N
;
// extra MN and 3N due to c0_add (MxN), bias (1xN), gamma (1xN), beta (1xN)
std
::
size_t
bytes
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
CDataType
)
*
2
*
M
*
N
+
sizeof
(
C0DataType
)
*
3
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
bytes
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
gemm
.
GetTypeString
()
<<
std
::
endl
;
bool
pass
=
true
;
if
(
do_verification
)
{
c_device_buf
.
FromDevice
(
c_m_n_device_result
.
mData
.
data
());
auto
ref_gemm
=
ReferenceInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n_host_result
,
c0_n_bias
,
c0_m_n_add
,
c0_n_gamma
,
c0_n_beta
,
a_element_op
,
b_element_op
,
acc_element_op
,
c_element_op
);
ref_invoker
.
Run
(
ref_argument
);
if
constexpr
(
std
::
is_same
<
CShuffleDataType
,
F32
>::
value
)
{
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
,
"Error: Incorrect results c"
);
}
else
if
constexpr
(
std
::
is_same
<
CShuffleDataType
,
F16
>::
value
)
{
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
,
"Error: Incorrect results c"
,
1e-2
,
1e-2
);
}
}
return
pass
?
0
:
1
;
}
example/23_softmax/softmax_blockwise.cpp
View file @
bd0f0686
...
@@ -150,6 +150,9 @@ int main(int argc, char* argv[])
...
@@ -150,6 +150,9 @@ int main(int argc, char* argv[])
AccDataType
alpha
=
args
.
scales
[
0
];
AccDataType
alpha
=
args
.
scales
[
0
];
AccDataType
beta
=
args
.
scales
[
1
];
AccDataType
beta
=
args
.
scales
[
1
];
std
::
cout
<<
"in: "
<<
in
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"out: "
<<
out
.
mDesc
<<
std
::
endl
;
std
::
size_t
num_thread
=
1
;
std
::
size_t
num_thread
=
1
;
if
(
args
.
do_verification
)
if
(
args
.
do_verification
)
...
@@ -195,7 +198,7 @@ int main(int argc, char* argv[])
...
@@ -195,7 +198,7 @@ int main(int argc, char* argv[])
using
ReferenceInstance
=
using
ReferenceInstance
=
tensor_operation
::
host
::
ReferenceSoftmax
<
InDataType
,
OutDataType
,
AccDataType
>
;
tensor_operation
::
host
::
ReferenceSoftmax
<
InDataType
,
OutDataType
,
AccDataType
>
;
ReferenceInstance
ref
;
ReferenceInstance
ref
;
auto
ref_arg
=
ref
.
MakeArgument
(
in
,
out_ref
,
alpha
,
beta
,
Rank
,
reduceDims
);
auto
ref_arg
=
ref
.
MakeArgument
(
in
,
out_ref
,
alpha
,
beta
,
reduceDims
);
auto
invoker
=
ref
.
MakeInvoker
();
auto
invoker
=
ref
.
MakeInvoker
();
invoker
.
Run
(
ref_arg
);
invoker
.
Run
(
ref_arg
);
// LogRangeAsType<float>(std::cout << "tensor out_ref: ", out_ref.mData, ",") << std::endl;
// LogRangeAsType<float>(std::cout << "tensor out_ref: ", out_ref.mData, ",") << std::endl;
...
@@ -212,8 +215,8 @@ int main(int argc, char* argv[])
...
@@ -212,8 +215,8 @@ int main(int argc, char* argv[])
auto
argument_ptr
=
device_instance
.
MakeArgumentPointer
(
i_inLengths
,
auto
argument_ptr
=
device_instance
.
MakeArgumentPointer
(
i_inLengths
,
i_inStrides
,
i_inStrides
,
reduceDims
,
reduceDims
,
alpha
,
&
alpha
,
beta
,
&
beta
,
in_dev
.
GetDeviceBuffer
(),
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
());
out_dev
.
GetDeviceBuffer
());
...
...
example/24_batched_gemm_c_permute/CMakeLists.txt
0 → 100644
View file @
bd0f0686
add_example_executable
(
example_batched_gemm_c_permute_xdl_fp16 batched_gemm_c_permute_xdl_fp16.cpp
)
example/24_batched_gemm_c_permute/batched_gemm_c_permute_xdl_fp16.cpp
0 → 100644
View file @
bd0f0686
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_gemm_c_permute_xdl.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ADataType
=
ck
::
half_t
;
using
BDataType
=
ck
::
half_t
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
float
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
// static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
// static constexpr auto MNPadding = ck::tensor_operation::device::GemmSpecialization::MNPadding;
static
constexpr
auto
MNKPadding
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKPadding
;
// clang-format off
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceBatchedGemmCPermuteXdl
//######| ALayout| BLayout| AData| BData| CData| AccData| A| B| C| GEMM| Num| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| | | Type| Type| Type| Type| Elementwise| Elementwise| Elementwise|Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//######| | | | | | | Operation| Operation| Operation| | | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
// < Row, Col, F16, F16, F16, F32, PassThrough, PassThrough, PassThrough, MNPadding, 1, 256, 256, 128, 32, 8, 8, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, true, 1, 1, S<1, 32, 1, 8>, 8>;
<
Row
,
Col
,
F16
,
F16
,
F16
,
F32
,
PassThrough
,
PassThrough
,
PassThrough
,
MNKPadding
,
1
,
256
,
128
,
64
,
32
,
8
,
8
,
32
,
32
,
2
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
true
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
true
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
// clang-format on
using
ReferenceBatchedGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceBatchedGemm
<
ADataType
,
BDataType
,
CDataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
const
int
M
=
88
;
const
int
N
=
64
;
const
int
K
=
88
;
const
int
stride_A
=
K
;
const
int
stride_B
=
K
;
const
int
G0
=
1024
;
const
int
G1
=
10
;
const
int
batch_count
=
G0
*
G1
;
// output layout - [G0, M, G1, N]
const
int
stride_G0
=
M
*
G1
*
N
;
const
int
stride_G1
=
N
;
const
int
stride_M
=
G1
*
N
;
const
int
stride_N
=
1
;
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=n0, 1=yes)
\n
"
);
exit
(
0
);
}
// GEMM shape
ck
::
tensor_operation
::
device
::
BatchedGemmCPermuteDesc
batched_gemm_c_permute_desc
{
G0
,
G1
,
M
,
N
,
stride_G0
,
stride_G1
,
stride_M
,
stride_N
};
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
batch_count_
,
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count_
,
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
row
*
stride
,
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count_
,
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
col
*
stride
,
1
,
stride
}));
}
};
Tensor
<
ADataType
>
a_g_m_k
(
f_host_tensor_descriptor
(
batch_count
,
M
,
K
,
stride_A
,
ALayout
{}));
Tensor
<
BDataType
>
b_g_k_n
(
f_host_tensor_descriptor
(
batch_count
,
K
,
N
,
stride_B
,
BLayout
{}));
auto
f_host_c_tensor_descriptor
=
[](
std
::
size_t
G0_
,
std
::
size_t
G1_
,
std
::
size_t
M_
,
std
::
size_t
N_
,
std
::
size_t
stride_G0_
,
std
::
size_t
stride_G1_
,
std
::
size_t
stride_M_
,
std
::
size_t
stride_N_
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
G0_
,
G1_
,
M_
,
N_
}),
std
::
vector
<
std
::
size_t
>
({
stride_G0_
,
stride_G1_
,
stride_M_
,
stride_N_
}));
};
Tensor
<
CDataType
>
c_g0_g1_m_n_host_result
(
f_host_c_tensor_descriptor
(
G0
,
G1
,
M
,
N
,
stride_G0
,
stride_G1
,
stride_M
,
stride_N
));
Tensor
<
CDataType
>
c_g0_g1_m_n_device_result
(
f_host_c_tensor_descriptor
(
G0
,
G1
,
M
,
N
,
stride_G0
,
stride_G1
,
stride_M
,
stride_N
));
std
::
cout
<<
"a_g_m_k: "
<<
a_g_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_g_k_n: "
<<
b_g_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c_g0_g1_m_n: "
<<
c_g0_g1_m_n_host_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_g_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_g_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
break
;
default:
a_g_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_g_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
break
;
}
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a_g_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_g_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c_device_buf
(
sizeof
(
CDataType
)
*
c_g0_g1_m_n_device_result
.
mDesc
.
GetElementSpace
());
a_device_buf
.
ToDevice
(
a_g_m_k
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_g_k_n
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
c_element_op
=
CElementOp
{};
auto
gemm
=
DeviceGemmInstance
{};
auto
invoker
=
gemm
.
MakeInvoker
();
// do GEMM
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c_device_buf
.
GetDeviceBuffer
()),
M
,
N
,
K
,
stride_A
,
stride_B
,
batched_gemm_c_permute_desc
,
a_element_op
,
b_element_op
,
c_element_op
,
batch_count
);
if
(
!
gemm
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! device_gemm with the specified compilation parameters does "
"not support this GEMM problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
batch_count
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
batch_count
*
M
*
K
+
sizeof
(
BDataType
)
*
batch_count
*
K
*
N
+
sizeof
(
CDataType
)
*
batch_count
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
gemm
.
GetTypeString
()
<<
std
::
endl
;
bool
pass
=
true
;
if
(
do_verification
)
{
c_device_buf
.
FromDevice
(
c_g0_g1_m_n_device_result
.
mData
.
data
());
auto
ref_batched_gemm
=
ReferenceBatchedGemmInstance
{};
auto
ref_invoker
=
ref_batched_gemm
.
MakeInvoker
();
Tensor
<
CDataType
>
c_g_m_n_host_result
=
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
M
,
N
}),
std
::
vector
<
std
::
size_t
>
({
M
*
N
,
N
,
1
}));
auto
ref_argument
=
ref_batched_gemm
.
MakeArgument
(
a_g_m_k
,
b_g_k_n
,
c_g_m_n_host_result
,
a_element_op
,
b_element_op
,
c_element_op
);
ref_invoker
.
Run
(
ref_argument
);
for
(
int
g0
=
0
;
g0
<
G0
;
g0
++
)
{
for
(
int
g1
=
0
;
g1
<
G1
;
g1
++
)
{
for
(
int
m
=
0
;
m
<
M
;
m
++
)
{
for
(
int
n
=
0
;
n
<
N
;
n
++
)
{
int
g
=
g0
*
G1
+
g1
;
c_g0_g1_m_n_host_result
(
g0
,
g1
,
m
,
n
)
=
c_g_m_n_host_result
(
g
,
m
,
n
);
}
}
}
}
pass
=
ck
::
utils
::
check_err
(
c_g0_g1_m_n_host_result
.
mData
,
c_g0_g1_m_n_device_result
.
mData
,
"Error: Incorrect results c"
);
}
return
pass
?
0
:
1
;
}
example/25_gemm_bias_c_permute/CMakeLists.txt
0 → 100644
View file @
bd0f0686
add_example_executable
(
example_gemm_bias_c_permute_xdl_fp16 gemm_bias_c_permute_xdl_fp16.cpp
)
example/25_gemm_bias_c_permute/gemm_bias_c_permute_xdl_fp16.cpp
0 → 100644
View file @
bd0f0686
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_bias_c_permute_xdl.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
Add
=
ck
::
tensor_operation
::
element_wise
::
Add
;
using
ADataType
=
F16
;
using
BDataType
=
F16
;
using
AccDataType
=
F32
;
using
CShuffleDataType
=
F32
;
using
DDataType
=
F16
;
using
EDataType
=
F16
;
using
ALayout
=
Row
;
using
BLayout
=
Col
;
using
DLayout
=
Row
;
using
ELayout
=
Row
;
using
AElementOp
=
PassThrough
;
using
BElementOp
=
PassThrough
;
using
CDEElementOp
=
Add
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
// clang-format off
using
DeviceOpInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmBiasCPermute_Xdl
//######| ALayout| BLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//######| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
<
ALayout
,
BLayout
,
ELayout
,
ADataType
,
BDataType
,
AccDataType
,
CShuffleDataType
,
DDataType
,
EDataType
,
AElementOp
,
BElementOp
,
CDEElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
1
>
;
// clang-format on
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
ck
::
index_t
M0
=
4
;
ck
::
index_t
M1
=
32
;
ck
::
index_t
M2
=
128
;
ck
::
index_t
N0
=
16
;
ck
::
index_t
N1
=
256
;
// GEMM shape
ck
::
index_t
M
=
M0
*
M1
*
M2
;
ck
::
index_t
N
=
N0
*
N1
;
ck
::
index_t
K
=
128
;
ck
::
index_t
stride_A
=
K
;
ck
::
index_t
stride_B
=
K
;
#if 1
// E = [M0, N0, M1, N1, M2]
ck
::
index_t
stride_E_M0
=
N0
*
M1
*
N1
*
M2
;
ck
::
index_t
stride_E_M1
=
N1
*
M2
;
ck
::
index_t
stride_E_M2
=
1
;
ck
::
index_t
stride_E_N0
=
M1
*
N1
*
M2
;
ck
::
index_t
stride_E_N1
=
M2
;
// D = [0, N0, 0, N1, 0]
ck
::
index_t
stride_D_M0
=
0
;
ck
::
index_t
stride_D_M1
=
0
;
ck
::
index_t
stride_D_M2
=
0
;
ck
::
index_t
stride_D_N0
=
N1
;
ck
::
index_t
stride_D_N1
=
1
;
#else
// D = [0, 0, 0, N0, N1]
ck
::
index_t
stride_D_M0
=
0
;
ck
::
index_t
stride_D_M1
=
0
;
ck
::
index_t
stride_D_M2
=
0
;
ck
::
index_t
stride_D_N0
=
N1
;
ck
::
index_t
stride_D_N1
=
1
;
// E = [M0, M1, M2, N0, N1]
ck
::
index_t
stride_E_M0
=
M1
*
M2
*
N0
*
N1
;
ck
::
index_t
stride_E_M1
=
M2
*
N0
*
N1
;
ck
::
index_t
stride_E_M2
=
N0
*
N1
;
ck
::
index_t
stride_E_N0
=
N1
;
ck
::
index_t
stride_E_N1
=
1
;
#endif
const
ck
::
tensor_operation
::
device
::
DEGridDesc_M0_M1_M2_N0_N1
d_grid_desc
{
M0
,
M1
,
M2
,
N0
,
N1
,
stride_D_M0
,
stride_D_M1
,
stride_D_M2
,
stride_D_N0
,
stride_D_N1
};
const
ck
::
tensor_operation
::
device
::
DEGridDesc_M0_M1_M2_N0_N1
e_grid_desc
{
M0
,
M1
,
M2
,
N0
,
N1
,
stride_E_M0
,
stride_E_M1
,
stride_E_M2
,
stride_E_N0
,
stride_E_N1
};
if
(
argc
==
1
)
{
// use default case
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=no, 1=yes)
\n
"
);
exit
(
0
);
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
};
auto
f_host_de_tensor_descriptor
=
[](
ck
::
tensor_operation
::
device
::
DEGridDesc_M0_M1_M2_N0_N1
de_grid_desc
)
{
std
::
size_t
m0
=
de_grid_desc
.
M0_
;
std
::
size_t
m1
=
de_grid_desc
.
M1_
;
std
::
size_t
m2
=
de_grid_desc
.
M2_
;
std
::
size_t
n0
=
de_grid_desc
.
N0_
;
std
::
size_t
n1
=
de_grid_desc
.
N1_
;
std
::
size_t
stride_m0
=
de_grid_desc
.
stride_M0_
;
std
::
size_t
stride_m1
=
de_grid_desc
.
stride_M1_
;
std
::
size_t
stride_m2
=
de_grid_desc
.
stride_M2_
;
std
::
size_t
stride_n0
=
de_grid_desc
.
stride_N0_
;
std
::
size_t
stride_n1
=
de_grid_desc
.
stride_N1_
;
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
m0
,
m1
,
m2
,
n0
,
n1
}),
std
::
vector
<
std
::
size_t
>
({
stride_m0
,
stride_m1
,
stride_m2
,
stride_n0
,
stride_n1
}));
};
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
stride_A
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
stride_B
,
BLayout
{}));
Tensor
<
DDataType
>
d_m0_m1_m2_n0_n1
(
f_host_de_tensor_descriptor
(
d_grid_desc
));
Tensor
<
EDataType
>
e_m0_m1_m2_n0_n1_host_result
(
f_host_de_tensor_descriptor
(
e_grid_desc
));
Tensor
<
EDataType
>
e_m0_m1_m2_n0_n1_device_result
(
f_host_de_tensor_descriptor
(
e_grid_desc
));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d_m0_m1_m2_n0_n1: "
<<
d_m0_m1_m2_n0_n1
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"e_m0_m1_m2_n0_n1: "
<<
e_m0_m1_m2_n0_n1_host_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
d_m0_m1_m2_n0_n1
.
GenerateTensorValue
(
GeneratorTensor_2
<
DDataType
>
{
-
5
,
5
});
break
;
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
d_m0_m1_m2_n0_n1
.
GenerateTensorValue
(
GeneratorTensor_3
<
DDataType
>
{
0.0
,
1.0
});
}
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d_m0_m1_m2_n0_n1_device_buf
(
sizeof
(
DDataType
)
*
d_m0_m1_m2_n0_n1
.
mDesc
.
GetElementSpace
());
DeviceMem
e_m0_m1_m2_n0_n1_device_buf
(
sizeof
(
EDataType
)
*
e_m0_m1_m2_n0_n1_device_result
.
mDesc
.
GetElementSpace
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
d_m0_m1_m2_n0_n1_device_buf
.
ToDevice
(
d_m0_m1_m2_n0_n1
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
cde_element_op
=
CDEElementOp
{};
// do GEMM
auto
device_op
=
DeviceOpInstance
{};
auto
invoker
=
device_op
.
MakeInvoker
();
auto
argument
=
device_op
.
MakeArgument
(
a_m_k_device_buf
.
GetDeviceBuffer
(),
b_k_n_device_buf
.
GetDeviceBuffer
(),
d_m0_m1_m2_n0_n1_device_buf
.
GetDeviceBuffer
(),
e_m0_m1_m2_n0_n1_device_buf
.
GetDeviceBuffer
(),
M
,
N
,
K
,
stride_A
,
stride_B
,
d_grid_desc
,
e_grid_desc
,
a_element_op
,
b_element_op
,
cde_element_op
);
if
(
!
device_op
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! this device_op instance does not support this problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
DDataType
)
*
N
+
sizeof
(
EDataType
)
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
device_op
.
GetTypeString
()
<<
std
::
endl
;
if
(
do_verification
)
{
Tensor
<
AccDataType
>
c_m_n
(
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
{
static_cast
<
std
::
size_t
>
(
M
),
static_cast
<
std
::
size_t
>
(
N
)}));
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
AccDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n
,
a_element_op
,
b_element_op
,
PassThrough
{});
ref_invoker
.
Run
(
ref_argument
);
for
(
int
m0
=
0
;
m0
<
M0
;
++
m0
)
for
(
int
m1
=
0
;
m1
<
M1
;
++
m1
)
for
(
int
m2
=
0
;
m2
<
M2
;
++
m2
)
for
(
int
n0
=
0
;
n0
<
N0
;
++
n0
)
for
(
int
n1
=
0
;
n1
<
N1
;
++
n1
)
{
int
m
=
m0
*
M1
*
M2
+
m1
*
M2
+
m2
;
int
n
=
n0
*
N1
+
n1
;
cde_element_op
(
e_m0_m1_m2_n0_n1_host_result
(
m0
,
m1
,
m2
,
n0
,
n1
),
ck
::
type_convert
<
EDataType
>
(
c_m_n
(
m
,
n
)),
d_m0_m1_m2_n0_n1
(
m0
,
m1
,
m2
,
n0
,
n1
));
}
e_m0_m1_m2_n0_n1_device_buf
.
FromDevice
(
e_m0_m1_m2_n0_n1_device_result
.
mData
.
data
());
return
ck
::
utils
::
check_err
(
e_m0_m1_m2_n0_n1_device_result
.
mData
,
e_m0_m1_m2_n0_n1_host_result
.
mData
)
?
0
:
1
;
}
return
0
;
}
example/26_contraction/CMakeLists.txt
0 → 100644
View file @
bd0f0686
add_example_executable
(
example_contraction_bilinear_xdl_fp32 contraction_bilinear_xdl_fp32.cpp
)
add_example_executable
(
example_contraction_scale_xdl_fp32 contraction_scale_xdl_fp32.cpp
)
example/26_contraction/README.md
0 → 100644
View file @
bd0f0686
# Instructions for ```example_contraction_bilinear_xdl_fp32```
## Run
```
bash
#arg1: verification (0=no, 1=yes)
#arg2: initialization (0=no init, 1=integer value, 2=decimal value)
#arg3: time kernel (0=no, 1=yes)
./bin/example_contraction_bilinear_xdl_fp32 1 1 1
```
Result (MI100 @ dynammic freq, 46TFlops peak FP32)
```
a_ms_ks: dim 4, lengths {30, 128, 32, 64}, strides {524288, 4096, 128, 1}
b_ks_ns: dim 4, lengths {32, 64, 32, 64}, strides {128, 1, 524288, 4096}
c_ms_ns: dim 4, lengths {30, 128, 32, 64}, strides {524288, 4096, 128, 1}
launch_and_time_kernel: grid_dim {240, 1, 1}, block_dim {256, 1, 1}
Warm up 1 time
Start running 10 times...
Perf: 0.843286 ms, 38.1985 TFlops, 94.5014 GB/s, DeviceContractionMultipleD_Xdl_CShuffle<256, 256, 128, 16, 4, 4>
```
example/26_contraction/contraction_bilinear_xdl_fp32.cpp
0 → 100644
View file @
bd0f0686
This diff is collapsed.
Click to expand it.
example/26_contraction/contraction_scale_xdl_fp32.cpp
0 → 100644
View file @
bd0f0686
This diff is collapsed.
Click to expand it.
example/CMakeLists.txt
View file @
bd0f0686
...
@@ -22,7 +22,7 @@ function(add_example_executable_no_testing EXAMPLE_NAME FILE_NAME)
...
@@ -22,7 +22,7 @@ function(add_example_executable_no_testing EXAMPLE_NAME FILE_NAME)
endfunction
(
add_example_executable_no_testing EXAMPLE_NAME
)
endfunction
(
add_example_executable_no_testing EXAMPLE_NAME
)
add_subdirectory
(
01_gemm
)
add_subdirectory
(
01_gemm
)
add_subdirectory
(
02_gemm_
alpha_beta
)
add_subdirectory
(
02_gemm_
bilinear
)
add_subdirectory
(
03_gemm_bias_relu
)
add_subdirectory
(
03_gemm_bias_relu
)
add_subdirectory
(
04_gemm_add_add_fastgelu
)
add_subdirectory
(
04_gemm_add_add_fastgelu
)
add_subdirectory
(
06_conv2d_fwd_bias_relu
)
add_subdirectory
(
06_conv2d_fwd_bias_relu
)
...
@@ -42,3 +42,6 @@ add_subdirectory(20_convnd_bwd_weight_xdl)
...
@@ -42,3 +42,6 @@ add_subdirectory(20_convnd_bwd_weight_xdl)
add_subdirectory
(
21_gemm_layernorm
)
add_subdirectory
(
21_gemm_layernorm
)
add_subdirectory
(
22_cgemm
)
add_subdirectory
(
22_cgemm
)
add_subdirectory
(
23_softmax
)
add_subdirectory
(
23_softmax
)
add_subdirectory
(
24_batched_gemm_c_permute
)
add_subdirectory
(
25_gemm_bias_c_permute
)
add_subdirectory
(
26_contraction
)
include/ck/ck.hpp
View file @
bd0f0686
...
@@ -102,7 +102,12 @@
...
@@ -102,7 +102,12 @@
#define CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR 0
#define CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR 0
// experimental feature: buffer load/store/atomic-add/ OOB trick
// experimental feature: buffer load/store/atomic-add/ OOB trick
// This (ifndef) is a hack to use customized behavior for buffer load rather than using default
// setting. Don't use this hack unless absolutely necessary!
// FIXME: make the behavior of buffer load a configurable (template) parameter for each usage
#ifndef CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK
#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 0
#define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 0
#endif
#define CK_EXPERIMENTAL_USE_BUFFER_STORE_OOB_CHECK_OFFSET_TRICK 1
#define CK_EXPERIMENTAL_USE_BUFFER_STORE_OOB_CHECK_OFFSET_TRICK 1
#define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_ADD_OOB_CHECK_OFFSET_TRICK 1
#define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_ADD_OOB_CHECK_OFFSET_TRICK 1
#define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_MAX_OOB_CHECK_OFFSET_TRICK 1
#define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_MAX_OOB_CHECK_OFFSET_TRICK 1
...
@@ -167,16 +172,6 @@ struct InMemoryDataOperationEnumSequence
...
@@ -167,16 +172,6 @@ struct InMemoryDataOperationEnumSequence
}
}
};
};
#if 0
// TODO: no longer needed, remove this
enum struct ActivTypeEnum
{
None,
LeakyRelu,
Sigmoid
};
#endif
// index type
// index type
using
index_t
=
int32_t
;
using
index_t
=
int32_t
;
using
long_index_t
=
int64_t
;
using
long_index_t
=
int64_t
;
...
...
include/ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp
View file @
bd0f0686
...
@@ -18,7 +18,7 @@ enum struct ConvolutionForwardSpecialization
...
@@ -18,7 +18,7 @@ enum struct ConvolutionForwardSpecialization
OddC
,
OddC
,
};
};
inline
std
::
string
getConvF
w
dSpecializationStr
(
const
ConvolutionForwardSpecialization
&
s
)
inline
std
::
string
getConvF
orwar
dSpecializationStr
ing
(
const
ConvolutionForwardSpecialization
&
s
)
{
{
switch
(
s
)
switch
(
s
)
{
{
...
...
include/ck/tensor_operation/gpu/device/device_batched_gemm.hpp
View file @
bd0f0686
...
@@ -12,33 +12,56 @@ namespace ck {
...
@@ -12,33 +12,56 @@ namespace ck {
namespace
tensor_operation
{
namespace
tensor_operation
{
namespace
device
{
namespace
device
{
template
<
typename
AElementwiseOperation
,
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
,
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CElementwiseOperation
>
typename
CElementwiseOperation
>
struct
DeviceBatchedGemm
:
public
BaseOperator
struct
DeviceBatchedGemm
:
public
BaseOperator
{
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_a
,
virtual
std
::
unique_ptr
<
BaseArgument
>
const
void
*
p_b
,
MakeArgumentPointer
(
const
void
*
p_a
,
void
*
p_c
,
const
void
*
p_b
,
ck
::
index_t
M
,
void
*
p_c
,
ck
::
index_t
N
,
ck
::
index_t
M
,
ck
::
index_t
K
,
ck
::
index_t
N
,
ck
::
index_t
StrideA
,
ck
::
index_t
K
,
ck
::
index_t
StrideB
,
ck
::
index_t
StrideA
,
ck
::
index_t
StrideC
,
ck
::
index_t
StrideB
,
AElementwiseOperation
a_element_op
,
ck
::
index_t
StrideC
,
BElementwiseOperation
b_element_op
,
ck
::
index_t
BatchStrideA
,
CElementwiseOperation
c_element_op
,
ck
::
index_t
BatchStrideB
,
ck
::
index_t
Batch
)
=
0
;
ck
::
index_t
BatchStrideC
,
ck
::
index_t
Batch
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
};
template
<
typename
AElementwiseOperation
,
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
,
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CElementwiseOperation
>
typename
CElementwiseOperation
>
using
DeviceBatchedGemmPtr
=
std
::
unique_ptr
<
using
DeviceBatchedGemmPtr
=
std
::
unique_ptr
<
DeviceBatchedGemm
<
ALayout
,
DeviceBatchedGemm
<
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
>>
;
BLayout
,
CLayout
,
ADataType
,
BDataType
,
CDataType
,
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
>>
;
}
// namespace device
}
// namespace device
}
// namespace tensor_operation
}
// namespace tensor_operation
...
...
include/ck/tensor_operation/gpu/device/device_batched_gemm_c_permute.hpp
0 → 100644
View file @
bd0f0686
#pragma once
#include <iostream>
#include <vector>
#include "device_base.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
struct
BatchedGemmCPermuteDesc
{
ck
::
index_t
G0_
,
G1_
,
M_
,
N_
;
ck
::
index_t
stride_G0_
,
stride_G1_
,
stride_M_
,
stride_N_
;
};
template
<
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CElementwiseOperation
>
struct
DeviceBatchedGemmCPermute
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_a
,
const
void
*
p_b
,
void
*
p_c
,
index_t
M
,
index_t
N
,
index_t
K
,
index_t
stride_A
,
index_t
stride_B
,
BatchedGemmCPermuteDesc
batched_gemm_c_permute_desc
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
,
ck
::
index_t
BatchCount
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
template
<
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CElementwiseOperation
>
using
DeviceBatchedGemmCPermutePtr
=
std
::
unique_ptr
<
DeviceBatchedGemmCPermute
<
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
>>
;
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/device_batched_gemm_c_permute_xdl.hpp
0 → 100644
View file @
bd0f0686
This diff is collapsed.
Click to expand it.
include/ck/tensor_operation/gpu/device/device_batched_gemm_xdl.hpp
View file @
bd0f0686
...
@@ -113,7 +113,7 @@ __global__ void
...
@@ -113,7 +113,7 @@ __global__ void
ignore
=
c_element_op
;
ignore
=
c_element_op
;
ignore
=
compute_ptr_offset_of_batch
;
ignore
=
compute_ptr_offset_of_batch
;
ignore
=
block_2_ctile_map
;
ignore
=
block_2_ctile_map
;
#endif
// end of if (defined(__gfx908__) || defined(__gfx90a__))
#endif
}
}
template
<
typename
ADataType
,
template
<
typename
ADataType
,
...
@@ -151,8 +151,15 @@ template <typename ADataType,
...
@@ -151,8 +151,15 @@ template <typename ADataType,
bool
BBlockLdsAddExtraN
,
bool
BBlockLdsAddExtraN
,
ck
::
index_t
CThreadTransferSrcDstVectorDim
,
ck
::
index_t
CThreadTransferSrcDstVectorDim
,
ck
::
index_t
CThreadTransferDstScalarPerVector
>
ck
::
index_t
CThreadTransferDstScalarPerVector
>
struct
DeviceBatchedGemmXdl
struct
DeviceBatchedGemmXdl
:
public
DeviceBatchedGemm
<
ALayout
,
:
public
DeviceBatchedGemm
<
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
>
BLayout
,
CLayout
,
ADataType
,
BDataType
,
CDataType
,
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
>
{
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
...
@@ -334,12 +341,15 @@ struct DeviceBatchedGemmXdl
...
@@ -334,12 +341,15 @@ struct DeviceBatchedGemmXdl
index_t
StrideA
,
index_t
StrideA
,
index_t
StrideB
,
index_t
StrideB
,
index_t
StrideC
,
index_t
StrideC
,
index_t
BatchStrideA
,
index_t
BatchStrideB
,
index_t
BatchStrideC
,
index_t
Batch
,
index_t
M01
,
index_t
M01
,
index_t
N01
,
index_t
N01
,
AElementwiseOperation
a_element_op
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
,
CElementwiseOperation
c_element_op
)
index_t
Batch
)
:
p_a_grid_
{
p_a_grid
},
:
p_a_grid_
{
p_a_grid
},
p_b_grid_
{
p_b_grid
},
p_b_grid_
{
p_b_grid
},
p_c_grid_
{
p_c_grid
},
p_c_grid_
{
p_c_grid
},
...
@@ -350,10 +360,7 @@ struct DeviceBatchedGemmXdl
...
@@ -350,10 +360,7 @@ struct DeviceBatchedGemmXdl
DeviceBatchedGemmXdl
::
MakeBGridDescriptor_K0_N_K1
(
K
,
N
,
StrideB
)},
DeviceBatchedGemmXdl
::
MakeBGridDescriptor_K0_N_K1
(
K
,
N
,
StrideB
)},
c_grid_desc_m_n_
{
DeviceBatchedGemmXdl
::
MakeCGridDescriptor_M_N
(
M
,
N
,
StrideC
)},
c_grid_desc_m_n_
{
DeviceBatchedGemmXdl
::
MakeCGridDescriptor_M_N
(
M
,
N
,
StrideC
)},
c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_
{},
c_grid_desc_m0_n0_m1_n1_m2_m3_m4_n2_
{},
compute_ptr_offset_of_batch_
{
compute_ptr_offset_of_batch_
{
BatchStrideA
,
BatchStrideB
,
BatchStrideC
},
type_convert
<
index_t
>
(
a_grid_desc_k0_m_k1_
.
GetElementSpaceSize
()),
type_convert
<
index_t
>
(
b_grid_desc_k0_n_k1_
.
GetElementSpaceSize
()),
type_convert
<
index_t
>
(
c_grid_desc_m_n_
.
GetElementSpaceSize
())},
block_2_ctile_map_
{
block_2_ctile_map_
{
GridwiseGemm
::
MakeDefaultBlock2CTileMap
(
c_grid_desc_m_n_
,
M01
,
N01
)},
GridwiseGemm
::
MakeDefaultBlock2CTileMap
(
c_grid_desc_m_n_
,
M01
,
N01
)},
M01_
{
M01
},
M01_
{
M01
},
...
@@ -536,10 +543,13 @@ struct DeviceBatchedGemmXdl
...
@@ -536,10 +543,13 @@ struct DeviceBatchedGemmXdl
index_t
StrideA
,
index_t
StrideA
,
index_t
StrideB
,
index_t
StrideB
,
index_t
StrideC
,
index_t
StrideC
,
index_t
BatchStrideA
,
index_t
BatchStrideB
,
index_t
BatchStrideC
,
index_t
Batch
,
AElementwiseOperation
a_element_op
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
,
CElementwiseOperation
c_element_op
)
index_t
Batch
)
{
{
return
Argument
{
p_a
,
return
Argument
{
p_a
,
p_b
,
p_b
,
...
@@ -550,12 +560,15 @@ struct DeviceBatchedGemmXdl
...
@@ -550,12 +560,15 @@ struct DeviceBatchedGemmXdl
StrideA
,
StrideA
,
StrideB
,
StrideB
,
StrideC
,
StrideC
,
BatchStrideA
,
BatchStrideB
,
BatchStrideC
,
Batch
,
1
,
1
,
1
,
1
,
a_element_op
,
a_element_op
,
b_element_op
,
b_element_op
,
c_element_op
,
c_element_op
};
Batch
};
}
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
...
@@ -570,10 +583,13 @@ struct DeviceBatchedGemmXdl
...
@@ -570,10 +583,13 @@ struct DeviceBatchedGemmXdl
index_t
StrideA
,
index_t
StrideA
,
index_t
StrideB
,
index_t
StrideB
,
index_t
StrideC
,
index_t
StrideC
,
index_t
BatchStrideA
,
index_t
BatchStrideB
,
index_t
BatchStrideC
,
index_t
Batch
,
AElementwiseOperation
a_element_op
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
,
CElementwiseOperation
c_element_op
)
override
index_t
Batch
)
override
{
{
return
std
::
make_unique
<
Argument
>
(
static_cast
<
const
ADataType
*>
(
p_a
),
return
std
::
make_unique
<
Argument
>
(
static_cast
<
const
ADataType
*>
(
p_a
),
static_cast
<
const
BDataType
*>
(
p_b
),
static_cast
<
const
BDataType
*>
(
p_b
),
...
@@ -584,12 +600,15 @@ struct DeviceBatchedGemmXdl
...
@@ -584,12 +600,15 @@ struct DeviceBatchedGemmXdl
StrideA
,
StrideA
,
StrideB
,
StrideB
,
StrideC
,
StrideC
,
BatchStrideA
,
BatchStrideB
,
BatchStrideC
,
Batch
,
1
,
1
,
1
,
1
,
a_element_op
,
a_element_op
,
b_element_op
,
b_element_op
,
c_element_op
,
c_element_op
);
Batch
);
}
}
// polymorphic
// polymorphic
...
...
Prev
1
2
3
4
5
6
…
20
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment