Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
57b7dca7
Commit
57b7dca7
authored
May 29, 2022
by
Anthony Chang
Browse files
layernorm example code & host verification
parent
83fde45b
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
168 additions
and
40 deletions
+168
-40
example/21_gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
..._gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
+134
-40
library/include/ck/library/host_tensor/host_tensor.hpp
library/include/ck/library/host_tensor/host_tensor.hpp
+34
-0
No files found.
example/21_gemm_layernorm/gemm_xdl_layernorm_single_kernel_fp16.cpp
View file @
57b7dca7
...
...
@@ -10,9 +10,10 @@
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "device_tensor.hpp"
#include "
device_gemm_xdl
.hpp"
#include "device_gemm_xdl_cshuffle.hpp"
#include "
tensor_layout
.hpp"
#include "device_gemm_xdl_
layernorm_
cshuffle.hpp"
#include "element_wise_operation.hpp"
#include "reduction_operator.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
...
...
@@ -25,12 +26,10 @@ using F32 = float;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ADataType
=
ck
::
half_t
;
using
BDataType
=
ck
::
half_t
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
float
;
using
ADataType
=
F16
;
using
BDataType
=
F16
;
using
CDataType
=
F16
;
using
AccDataType
=
F32
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
...
...
@@ -43,16 +42,73 @@ using CElementOp = ck::tensor_operation::element_wise::PassThrough;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
// clang-format off
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemm_Xdl_CShuffle
//######| ALayout| BLayout| CLayout|
AData| BData| CData|
AccData| CShuffle
| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| | | |
Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar|
Add
ExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar|
Add
ExtraN| MXdlPerWave| NXdlPerWave| _MBlock_M
WaveMPerXdl|
ScalarPerVector|
//######| | | |
| | |
|
| Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_N
WaveNPerXdl| _NWaveNPerXdl
|
//######| | | |
| | |
|
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
<
Row
,
Col
,
Row
,
F16
,
F16
,
F16
,
F32
,
F32
,
AElementOp
,
BElementOp
,
CElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemm
LayerNorm
_Xdl_CShuffle
//######| ALayout| BLayout| CLayout|AData| BData| CData|
GemmAcc| CShuffle| ReduceAcc
| A| B| C|
GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
CReduce| CReduceThreadLds2VGprCopy| CReduceThreadVgpr2GlobalCopy|
//######| | | | Type| Type| Type|
DataType| Data
Type|
DataType| Elementwise| Elementwise| Elementwise|
Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar|
ExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar|
ExtraN| MXdlPerWave| NXdlPerWave|
_MBlock_M
PerBlock| ScalarPerVector| ThreadClusterLengths| SrcDstScalarPerVector| SrcDst
ScalarPerVector|
//######| | | | | | |
| |
| Operation| Operation| Operation|
| Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle|
_NBlock_N
PerBlock| _NPerBlock| _MPerBlock_NPerBlock| _NPerBlock| _MPerBlock
|
//######| | | | | | |
| |
| | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | |
<
Row
,
Col
,
Row
,
F16
,
F16
,
F16
,
AccDataType
,
AccDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
CElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
2
,
S
<
1
,
32
,
1
,
8
>
,
8
,
S
<
64
,
4
>
,
4
,
1
>
;
// clang-format on
// D = Layernorm(acc + broadcast(bias)) * broadcast(gamma) + broadcast(beta)
template
<
typename
InDataType
,
typename
OutDataType
>
void
Layernorm
(
Tensor
<
OutDataType
>&
result
,
const
Tensor
<
InDataType
>&
acc
,
// MxN
const
Tensor
<
InDataType
>&
bias
,
// 1xN
const
Tensor
<
InDataType
>&
gamma
,
// 1xN
const
Tensor
<
InDataType
>&
beta
,
// 1xN
const
InDataType
epsilon
=
1e-5
)
{
assert
(
acc
.
mDesc
.
GetLengths
()[
1
]
==
bias
.
mDesc
.
GetLengths
()[
0
]
&&
acc
.
mDesc
.
GetLengths
()[
1
]
==
gamma
.
mDesc
.
GetLengths
()[
0
]
&&
acc
.
mDesc
.
GetLengths
()[
1
]
==
beta
.
mDesc
.
GetLengths
()[
0
]);
size_t
M
=
acc
.
mDesc
.
GetLengths
()[
0
];
size_t
N
=
acc
.
mDesc
.
GetLengths
()[
1
];
Tensor
<
InDataType
>
avg_acc_sq
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
M
})));
Tensor
<
InDataType
>
avg_acc
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
M
})));
Tensor
<
InDataType
>
acc_layernorm
(
acc
.
mDesc
);
// add bias
acc_layernorm
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
self
(
idx
[
0
],
idx
[
1
])
=
acc
(
idx
[
0
],
idx
[
1
])
+
bias
(
idx
[
1
]);
});
// reduce N dim
for
(
size_t
i
=
0
;
i
<
M
;
i
++
)
{
InDataType
sum_acc_sq
=
0
;
InDataType
sum_acc
=
0
;
for
(
size_t
j
=
0
;
j
<
N
;
j
++
)
{
sum_acc_sq
+=
acc_layernorm
(
i
,
j
)
*
acc_layernorm
(
i
,
j
);
sum_acc
+=
acc_layernorm
(
i
,
j
);
}
avg_acc_sq
(
i
)
=
sum_acc_sq
/
N
;
avg_acc
(
i
)
=
sum_acc
/
N
;
// std::cout << "avg_acc_(" << i << ") =" << avg_acc(i) << std::endl;
// std::cout << "avg_acc_sq_(" << i << ") =" << avg_acc_sq(i) << std::endl;
}
// normalize
acc_layernorm
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
self
(
idx
[
0
],
idx
[
1
])
=
(
self
(
idx
[
0
],
idx
[
1
])
-
avg_acc
(
idx
[
0
]))
/
sqrt
(
avg_acc_sq
(
idx
[
0
])
-
avg_acc
(
idx
[
0
])
*
avg_acc
(
idx
[
0
])
+
epsilon
);
});
// affine
acc_layernorm
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
self
(
idx
[
0
],
idx
[
1
])
=
self
(
idx
[
0
],
idx
[
1
])
*
gamma
(
idx
[
1
])
+
beta
(
idx
[
1
]);
});
// cast
result
=
acc_layernorm
.
template
CopyAsType
<
OutDataType
>();
}
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
C
DataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
ReferenceGemm
<
ADataType
,
BDataType
,
Acc
DataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
...
...
@@ -62,14 +118,18 @@ int main(int argc, char* argv[])
// GEMM shape
ck
::
index_t
M
=
3840
;
ck
::
index_t
N
=
4096
;
ck
::
index_t
N
=
128
;
ck
::
index_t
K
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideC
=
4096
;
ck
::
index_t
StrideC
=
128
;
if
(
argc
==
4
)
if
(
argc
==
1
)
{
// do nothing
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
...
...
@@ -116,10 +176,17 @@ int main(int argc, char* argv[])
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
CDataType
>
c_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
CDataType
>
c_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
AccDataType
>
acc_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
AccDataType
>
c0_n_bias
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
Tensor
<
AccDataType
>
c0_n_gamma
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
Tensor
<
AccDataType
>
c0_n_beta
(
HostTensorDescriptor
(
std
::
vector
<
size_t
>
({
size_t
(
N
)})));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c_m_n: "
<<
c_m_n_host_result
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_bias: "
<<
c0_n_bias
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_gamma: "
<<
c0_n_gamma
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n_beta: "
<<
c0_n_beta
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
...
...
@@ -137,12 +204,25 @@ int main(int argc, char* argv[])
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_Sequential
<
1
>
{});
}
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c_m_n_device_buf
(
sizeof
(
CDataType
)
*
c_m_n_device_result
.
mDesc
.
GetElementSpace
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
// TODO ANT: test other init
c_m_n_host_result
.
GenerateTensorValue
(
GeneratorTensor_1
<
CDataType
>
{
0
});
acc_m_n_host_result
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
0
});
c0_n_bias
.
GenerateTensorValue
(
GeneratorTensor_Sequential
<
0
>
{});
c0_n_gamma
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
2
});
c0_n_beta
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
2
});
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c_device_buf
(
sizeof
(
CDataType
)
*
c_m_n_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_bias_buf
(
sizeof
(
AccDataType
)
*
c0_n_bias
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_gamma_buf
(
sizeof
(
AccDataType
)
*
c0_n_gamma
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_beta_buf
(
sizeof
(
AccDataType
)
*
c0_n_beta
.
mDesc
.
GetElementSpace
());
a_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
c0_bias_buf
.
ToDevice
(
c0_n_bias
.
mData
.
data
());
c0_gamma_buf
.
ToDevice
(
c0_n_gamma
.
mData
.
data
());
c0_beta_buf
.
ToDevice
(
c0_n_beta
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
...
...
@@ -151,9 +231,12 @@ int main(int argc, char* argv[])
// do GEMM
auto
gemm
=
DeviceGemmInstance
{};
auto
invoker
=
gemm
.
MakeInvoker
();
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_m_k_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_k_n_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c_m_n_device_buf
.
GetDeviceBuffer
()),
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c_device_buf
.
GetDeviceBuffer
()),
static_cast
<
AccDataType
*>
(
c0_bias_buf
.
GetDeviceBuffer
()),
static_cast
<
AccDataType
*>
(
c0_gamma_buf
.
GetDeviceBuffer
()),
static_cast
<
AccDataType
*>
(
c0_beta_buf
.
GetDeviceBuffer
()),
M
,
N
,
K
,
...
...
@@ -184,20 +267,31 @@ int main(int argc, char* argv[])
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
gemm
.
GetTypeString
()
<<
std
::
endl
;
c_m_n_device_buf
.
FromDevice
(
c_m_n_device_result
.
mData
.
data
());
bool
pass
=
true
;
if
(
do_verification
)
{
c_device_buf
.
FromDevice
(
c_m_n_device_result
.
mData
.
data
());
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n_host_result
,
a_element_op
,
b_element_op
,
c_element_op
);
a_m_k
,
b_k_n
,
ac
c_m_n_host_result
,
a_element_op
,
b_element_op
,
c_element_op
);
ref_invoker
.
Run
(
ref_argument
);
return
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
)
?
0
:
1
;
}
Layernorm
(
c_m_n_host_result
,
acc_m_n_host_result
,
c0_n_bias
,
c0_n_gamma
,
c0_n_beta
);
return
0
;
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
,
"Error: Incorrect results c"
);
// if (!pass)
// {
// LogRangeAsType<float>(std::cout << "c_host: ", c_m_n_host_result.mData, ",")
// << std::endl;
// LogRangeAsType<float>(std::cout << "c_device: ", c_m_n_device_result.mData, ",")
// << std::endl;
// }
}
return
pass
?
0
:
1
;
}
library/include/ck/library/host_tensor/host_tensor.hpp
View file @
57b7dca7
...
...
@@ -212,6 +212,40 @@ struct Tensor
Tensor
(
const
HostTensorDescriptor
&
desc
)
:
mDesc
(
desc
),
mData
(
mDesc
.
GetElementSpace
())
{}
template
<
typename
OutT
>
Tensor
<
OutT
>
CopyAsType
()
{
Tensor
<
OutT
>
ret
(
mDesc
);
for
(
size_t
i
=
0
;
i
<
mData
.
size
();
i
++
)
{
ret
.
mData
[
i
]
=
static_cast
<
OutT
>
(
mData
[
i
]);
}
return
ret
;
}
template
<
typename
F
>
void
ForEach_impl
(
F
&&
f
,
std
::
vector
<
size_t
>&
idx
,
size_t
rank
)
{
if
(
rank
==
mDesc
.
GetNumOfDimension
())
{
f
(
*
this
,
idx
);
return
;
}
// else
for
(
size_t
i
=
0
;
i
<
mDesc
.
GetLengths
()[
rank
];
i
++
)
{
idx
[
rank
]
=
i
;
ForEach_impl
(
std
::
forward
<
F
>
(
f
),
idx
,
rank
+
1
);
}
}
template
<
typename
F
>
void
ForEach
(
F
&&
f
)
{
std
::
vector
<
size_t
>
idx
(
mDesc
.
GetNumOfDimension
(),
0
);
ForEach_impl
(
std
::
forward
<
F
>
(
f
),
idx
,
size_t
(
0
));
}
template
<
typename
G
>
void
GenerateTensorValue
(
G
g
,
std
::
size_t
num_thread
=
1
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment