Commit 0435c336 authored by letaoqin's avatar letaoqin
Browse files

refactor code

parent 52d31393
......@@ -71,19 +71,6 @@ using Relu = ck::tensor_operation::element_wise::Relu;
using Silu = ck::tensor_operation::element_wise::Silu;
using Sigmoid = ck::tensor_operation::element_wise::Sigmoid;
enum class ActivationType
{
Gelu = 0,
Relu,
Silu,
Swiglu,
Geglu,
Sigmoid,
Identity,
GeluNoneApproximate,
GeGluNoneApproximate,
InvalidType
};
struct GemmBiasAddArgs
{
const void* mat_a;
......@@ -95,6 +82,7 @@ struct GemmBiasAddArgs
ck::index_t K;
};
float gemm_bias_add_fp16(const GemmBiasAddArgs& args,
const StreamConfig& config,
ActivationType op_type);
float gemm_bias_add_relu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
float gemm_bias_add_gelu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
float gemm_bias_add_silu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
float gemm_bias_add_sigmoid_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
......@@ -24,6 +24,7 @@ using ALayout = Row;
using BLayout = Row;
using D0Layout = Row;
using CLayout = Row;
using DsLayout = ck::Tuple<D0Layout>;
using AElementOp = PassThrough;
using BElementOp = PassThrough;
......@@ -34,7 +35,7 @@ using S = ck::Sequence<Is...>;
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
// clang-format off
template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType,
......@@ -51,7 +52,7 @@ using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMul
1, 1,
S<1, 16, 1, 4>, S<4, 4>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType,
......@@ -69,7 +70,7 @@ using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_
S<1, 16, 1, 4>, S<2, 2>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
// clang-format on
template <typename DsLayout, typename CDEElementOp>
template <typename CDEElementOp>
float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config)
{
using ADataType = ck::half_t;
......@@ -127,46 +128,31 @@ float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config)
return true;
};
auto gemm = DeviceOpInstance_64_16_16_64<DsLayout,
ADataType,
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
auto gemm =
DeviceOpInstance_64_16_16_64<ADataType, BDataType, DsDataType, CDataType, CDEElementOp>{};
if(!Run(gemm))
{
auto gemm_def = DeviceOpInstance_default<DsLayout,
ADataType,
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
auto gemm_def =
DeviceOpInstance_default<ADataType, BDataType, DsDataType, CDataType, CDEElementOp>{};
Run(gemm_def);
}
return ave_time;
}
float gemm_bias_add_fp16(const GemmBiasAddArgs& args,
const StreamConfig& config,
ActivationType op_type)
float gemm_bias_add_relu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
{
using DsLayout = ck::Tuple<D0Layout>;
switch(op_type)
{
case ActivationType::Gelu:
case ActivationType::Geglu:
case ActivationType::GeluNoneApproximate:
case ActivationType::GeGluNoneApproximate:
return run_impl<DsLayout, ck::impl::AddActivation<Gelu>>(args, config);
case ActivationType::Relu:
return run_impl<DsLayout, ck::impl::AddActivation<Relu>>(args, config);
case ActivationType::Silu:
case ActivationType::Swiglu:
return run_impl<DsLayout, ck::impl::AddActivation<Silu>>(args, config);
case ActivationType::Sigmoid:
return run_impl<DsLayout, ck::impl::AddActivation<Sigmoid>>(args, config);
case ActivationType::Identity:
case ActivationType::InvalidType:
default: return 0;
}
return run_impl<ck::impl::AddActivation<Relu>>(args, config);
}
float gemm_bias_add_gelu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
{
return run_impl<ck::impl::AddActivation<Gelu>>(args, config);
}
float gemm_bias_add_silu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
{
return run_impl<ck::impl::AddActivation<Silu>>(args, config);
}
float gemm_bias_add_sigmoid_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
{
return run_impl<ck::impl::AddActivation<Sigmoid>>(args, config);
}
......@@ -172,8 +172,8 @@ int main(int argc, char* argv[])
printf("arg1: verification (0=no, 1=yes)\n");
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
printf("arg3: time kernel (0=no, 1=yes)\n");
printf("arg4 to 9: M (256x), N(128x), K(32x)m, op_type(Gelu = 0, Relu, Silu, Swiglu, "
"Geglu, Identity, GeluNoneApproximate, GeGluNoneApproximate)\n");
printf("arg4 to 9: M (256x), N(128x), K(32x)m, op_type(Gelu = 0, Relu = 1, Silu = 2, "
"Sigmoid = 3\n");
exit(0);
}
......@@ -238,9 +238,15 @@ int main(int argc, char* argv[])
N,
K};
float ave_time = gemm_bias_add_fp16(gemm_args,
StreamConfig{nullptr, time_kernel, 20, 50},
static_cast<ActivationType>(op_type));
float ave_time = 0;
if(op_type == 0)
gemm_bias_add_gelu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else if(op_type == 1)
gemm_bias_add_relu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else if(op_type == 2)
gemm_bias_add_silu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else
gemm_bias_add_sigmoid_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
// float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel, 20, 50});
std::size_t flop = std::size_t(2) * M * N * K;
......@@ -275,23 +281,14 @@ int main(int argc, char* argv[])
}
}
};
ActivationType type = static_cast<ActivationType>(op_type);
switch(type)
{
case ActivationType::Gelu:
case ActivationType::Geglu:
case ActivationType::GeluNoneApproximate:
case ActivationType::GeGluNoneApproximate:
if(op_type == 0)
run_elementwise(ck::impl::AddActivation<Gelu>{});
break;
case ActivationType::Relu: run_elementwise(ck::impl::AddActivation<Relu>{}); break;
case ActivationType::Silu:
case ActivationType::Swiglu: run_elementwise(ck::impl::AddActivation<Silu>{}); break;
case ActivationType::Sigmoid: run_elementwise(ck::impl::AddActivation<Sigmoid>{}); break;
case ActivationType::Identity:
case ActivationType::InvalidType:
default: break;
}
else if(op_type == 1)
run_elementwise(ck::impl::AddActivation<Relu>{});
else if(op_type == 2)
run_elementwise(ck::impl::AddActivation<Silu>{});
else
run_elementwise(ck::impl::AddActivation<Sigmoid>{});
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment