Commit 52d31393 authored by letaoqin's avatar letaoqin
Browse files

finish gelu,relu and silu

parent 9857ca19
set(GEMM_BIAS_ADD_SOURCES set(GEMM_BIAS_ADD_SOURCES
gemm_bias_add_xdl_fp16.cpp
gemm_bias_add_fp16.cpp gemm_bias_add_fp16.cpp
gemm_bias_add_xdl_fp16.cpp
) )
add_executable(example_gemm_bias_add_xdl_fp16 ${GEMM_BIAS_ADD_SOURCES}) add_executable(example_gemm_bias_add_xdl_fp16 ${GEMM_BIAS_ADD_SOURCES})
target_link_libraries(example_gemm_bias_add_xdl_fp16 PRIVATE utility) target_link_libraries(example_gemm_bias_add_xdl_fp16 PRIVATE utility)
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include "ck/stream_config.hpp" #include "ck/stream_config.hpp"
#include "ck/utility/data_type.hpp" #include "ck/utility/data_type.hpp"
#include "ck/utility/type_convert.hpp" #include "ck/utility/type_convert.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
namespace ck { namespace ck {
namespace impl { namespace impl {
...@@ -63,6 +65,12 @@ struct AddActivation ...@@ -63,6 +65,12 @@ struct AddActivation
} // namespace impl } // namespace impl
} // namespace ck } // namespace ck
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using Gelu = ck::tensor_operation::element_wise::Gelu;
using Relu = ck::tensor_operation::element_wise::Relu;
using Silu = ck::tensor_operation::element_wise::Silu;
using Sigmoid = ck::tensor_operation::element_wise::Sigmoid;
enum class ActivationType enum class ActivationType
{ {
Gelu = 0, Gelu = 0,
...@@ -70,6 +78,7 @@ enum class ActivationType ...@@ -70,6 +78,7 @@ enum class ActivationType
Silu, Silu,
Swiglu, Swiglu,
Geglu, Geglu,
Sigmoid,
Identity, Identity,
GeluNoneApproximate, GeluNoneApproximate,
GeGluNoneApproximate, GeGluNoneApproximate,
...@@ -86,4 +95,6 @@ struct GemmBiasAddArgs ...@@ -86,4 +95,6 @@ struct GemmBiasAddArgs
ck::index_t K; ck::index_t K;
}; };
float gemm_bias_add_fp16(const GemmBiasAddArgs& args, const StreamConfig& config); float gemm_bias_add_fp16(const GemmBiasAddArgs& args,
const StreamConfig& config,
ActivationType op_type);
...@@ -23,16 +23,10 @@ using CShuffleDataType = F32; ...@@ -23,16 +23,10 @@ using CShuffleDataType = F32;
using ALayout = Row; using ALayout = Row;
using BLayout = Row; using BLayout = Row;
using D0Layout = Row; using D0Layout = Row;
using DsLayout = ck::Tuple<D0Layout>;
using CLayout = Row; using CLayout = Row;
using PassThrough = ck::tensor_operation::element_wise::PassThrough; using AElementOp = PassThrough;
using Relu = ck::tensor_operation::element_wise::Relu; using BElementOp = PassThrough;
using AElementOp = PassThrough;
using BElementOp = PassThrough;
using CDEElementOp = ck::impl::AddActivation<Relu>;
;
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
...@@ -40,7 +34,7 @@ using S = ck::Sequence<Is...>; ...@@ -40,7 +34,7 @@ using S = ck::Sequence<Is...>;
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
// clang-format off // clang-format off
template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType> template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3< using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType, ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType, DsDataType, CDataType, AccDataType, CShuffleDataType,
...@@ -57,7 +51,7 @@ using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMul ...@@ -57,7 +51,7 @@ using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMul
1, 1, 1, 1,
S<1, 16, 1, 4>, S<4, 4>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>; S<1, 16, 1, 4>, S<4, 4>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType> template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3< using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType, ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType, DsDataType, CDataType, AccDataType, CShuffleDataType,
...@@ -75,8 +69,8 @@ using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_ ...@@ -75,8 +69,8 @@ using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_
S<1, 16, 1, 4>, S<2, 2>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>; S<1, 16, 1, 4>, S<2, 2>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
// clang-format on // clang-format on
template <typename DsLayout, typename CDEElementOp>
float gemm_bias_add_fp16(const GemmBiasAddArgs& args, const StreamConfig& config) float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config)
{ {
using ADataType = ck::half_t; using ADataType = ck::half_t;
using BDataType = ck::half_t; using BDataType = ck::half_t;
...@@ -133,12 +127,46 @@ float gemm_bias_add_fp16(const GemmBiasAddArgs& args, const StreamConfig& config ...@@ -133,12 +127,46 @@ float gemm_bias_add_fp16(const GemmBiasAddArgs& args, const StreamConfig& config
return true; return true;
}; };
auto gemm = DeviceOpInstance_64_16_16_64<ADataType, BDataType, DsDataType, CDataType>{}; auto gemm = DeviceOpInstance_64_16_16_64<DsLayout,
ADataType,
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
if(!Run(gemm)) if(!Run(gemm))
{ {
auto gemm_def = DeviceOpInstance_default<ADataType, BDataType, DsDataType, CDataType>{}; auto gemm_def = DeviceOpInstance_default<DsLayout,
ADataType,
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
Run(gemm_def); Run(gemm_def);
} }
return ave_time; return ave_time;
} }
float gemm_bias_add_fp16(const GemmBiasAddArgs& args,
const StreamConfig& config,
ActivationType op_type)
{
using DsLayout = ck::Tuple<D0Layout>;
switch(op_type)
{
case ActivationType::Gelu:
case ActivationType::Geglu:
case ActivationType::GeluNoneApproximate:
case ActivationType::GeGluNoneApproximate:
return run_impl<DsLayout, ck::impl::AddActivation<Gelu>>(args, config);
case ActivationType::Relu:
return run_impl<DsLayout, ck::impl::AddActivation<Relu>>(args, config);
case ActivationType::Silu:
case ActivationType::Swiglu:
return run_impl<DsLayout, ck::impl::AddActivation<Silu>>(args, config);
case ActivationType::Sigmoid:
return run_impl<DsLayout, ck::impl::AddActivation<Sigmoid>>(args, config);
case ActivationType::Identity:
case ActivationType::InvalidType:
default: return 0;
}
}
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
#include "ck/library/utility/host_tensor_generator.hpp" #include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp" #include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp" #include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp" #include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
...@@ -39,12 +37,14 @@ using D0Layout = Row; ...@@ -39,12 +37,14 @@ using D0Layout = Row;
using DsLayout = ck::Tuple<D0Layout>; using DsLayout = ck::Tuple<D0Layout>;
using ELayout = Row; using ELayout = Row;
using PassThrough = ck::tensor_operation::element_wise::PassThrough; // using PassThrough = ck::tensor_operation::element_wise::PassThrough;
using Relu = ck::tensor_operation::element_wise::Relu; // using Gelu = ck::tensor_operation::element_wise::Gelu;
// using Relu = ck::tensor_operation::element_wise::Relu;
// using Silu = ck::tensor_operation::element_wise::Silu;
// using Sigmoid = ck::tensor_operation::element_wise::Sigmoid;
using AElementOp = PassThrough; using AElementOp = PassThrough;
using BElementOp = PassThrough; using BElementOp = PassThrough;
using CElementOp = ck::impl::AddActivation<Relu>;
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<A0DataType, using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<A0DataType,
B0DataType, B0DataType,
...@@ -139,6 +139,7 @@ int main(int argc, char* argv[]) ...@@ -139,6 +139,7 @@ int main(int argc, char* argv[])
bool do_verification = true; bool do_verification = true;
int init_method = 1; int init_method = 1;
bool time_kernel = true; bool time_kernel = true;
int op_type = 0;
// GEMM shape // GEMM shape
ck::index_t M = 64; ck::index_t M = 64;
...@@ -155,22 +156,24 @@ int main(int argc, char* argv[]) ...@@ -155,22 +156,24 @@ int main(int argc, char* argv[])
init_method = std::stoi(argv[2]); init_method = std::stoi(argv[2]);
time_kernel = std::stoi(argv[3]); time_kernel = std::stoi(argv[3]);
} }
else if(argc == 7) else if(argc == 8)
{ {
do_verification = std::stoi(argv[1]); do_verification = std::stoi(argv[1]);
init_method = std::stoi(argv[2]); init_method = std::stoi(argv[2]);
time_kernel = std::stoi(argv[3]); time_kernel = std::stoi(argv[3]);
M = std::stoi(argv[4]); M = std::stoi(argv[4]);
N = std::stoi(argv[5]); N = std::stoi(argv[5]);
K = std::stoi(argv[6]); K = std::stoi(argv[6]);
op_type = std::stoi(argv[7]);
} }
else else
{ {
printf("arg1: verification (0=no, 1=yes)\n"); printf("arg1: verification (0=no, 1=yes)\n");
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"); printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
printf("arg3: time kernel (0=no, 1=yes)\n"); printf("arg3: time kernel (0=no, 1=yes)\n");
printf("arg4 to 9: M (256x), N(128x), K(32x)m\n"); printf("arg4 to 9: M (256x), N(128x), K(32x)m, op_type(Gelu = 0, Relu, Silu, Swiglu, "
"Geglu, Identity, GeluNoneApproximate, GeGluNoneApproximate)\n");
exit(0); exit(0);
} }
...@@ -235,7 +238,9 @@ int main(int argc, char* argv[]) ...@@ -235,7 +238,9 @@ int main(int argc, char* argv[])
N, N,
K}; K};
float ave_time = gemm_bias_add_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50}); float ave_time = gemm_bias_add_fp16(gemm_args,
StreamConfig{nullptr, time_kernel, 20, 50},
static_cast<ActivationType>(op_type));
// float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel, 20, 50}); // float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel, 20, 50});
std::size_t flop = std::size_t(2) * M * N * K; std::size_t flop = std::size_t(2) * M * N * K;
...@@ -253,9 +258,6 @@ int main(int argc, char* argv[]) ...@@ -253,9 +258,6 @@ int main(int argc, char* argv[])
if(do_verification) if(do_verification)
{ {
// RunUnfusedTest(a0_m_k.mData, b0_k_n.mData, d0_m_n.mData, e_m_n_host_result.mData, K, M,
// N);
auto ref_gemm = ReferenceGemmInstance{}; auto ref_gemm = ReferenceGemmInstance{};
auto ref_invoker = ref_gemm.MakeInvoker(); auto ref_invoker = ref_gemm.MakeInvoker();
...@@ -264,13 +266,31 @@ int main(int argc, char* argv[]) ...@@ -264,13 +266,31 @@ int main(int argc, char* argv[])
ref_invoker.Run(ref_argument); ref_invoker.Run(ref_argument);
CElementOp cde_element_op; auto run_elementwise = [&](auto cde_element_op) {
for(int m = 0; m < M; ++m) for(int m = 0; m < M; ++m)
{
for(int n = 0; n < N; ++n)
{ {
cde_element_op(e_m_n_host_result(m, n), e_m_n_host_result(m, n), d0_m_n(m, n)); for(int n = 0; n < N; ++n)
{
cde_element_op(e_m_n_host_result(m, n), e_m_n_host_result(m, n), d0_m_n(m, n));
}
} }
};
ActivationType type = static_cast<ActivationType>(op_type);
switch(type)
{
case ActivationType::Gelu:
case ActivationType::Geglu:
case ActivationType::GeluNoneApproximate:
case ActivationType::GeGluNoneApproximate:
run_elementwise(ck::impl::AddActivation<Gelu>{});
break;
case ActivationType::Relu: run_elementwise(ck::impl::AddActivation<Relu>{}); break;
case ActivationType::Silu:
case ActivationType::Swiglu: run_elementwise(ck::impl::AddActivation<Silu>{}); break;
case ActivationType::Sigmoid: run_elementwise(ck::impl::AddActivation<Sigmoid>{}); break;
case ActivationType::Identity:
case ActivationType::InvalidType:
default: break;
} }
e_device_buf.FromDevice(e_m_n_device_result.mData.data()); e_device_buf.FromDevice(e_m_n_device_result.mData.data());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment