"docs/source/en/vscode:/vscode.git/clone" did not exist on "3eaead0c4a55bf11bdf832eaa61d0e87fe5464df"
Commit 0435c336 authored by letaoqin's avatar letaoqin
Browse files

refactor code

parent 52d31393
...@@ -71,19 +71,6 @@ using Relu = ck::tensor_operation::element_wise::Relu; ...@@ -71,19 +71,6 @@ using Relu = ck::tensor_operation::element_wise::Relu;
using Silu = ck::tensor_operation::element_wise::Silu; using Silu = ck::tensor_operation::element_wise::Silu;
using Sigmoid = ck::tensor_operation::element_wise::Sigmoid; using Sigmoid = ck::tensor_operation::element_wise::Sigmoid;
enum class ActivationType
{
Gelu = 0,
Relu,
Silu,
Swiglu,
Geglu,
Sigmoid,
Identity,
GeluNoneApproximate,
GeGluNoneApproximate,
InvalidType
};
struct GemmBiasAddArgs struct GemmBiasAddArgs
{ {
const void* mat_a; const void* mat_a;
...@@ -95,6 +82,7 @@ struct GemmBiasAddArgs ...@@ -95,6 +82,7 @@ struct GemmBiasAddArgs
ck::index_t K; ck::index_t K;
}; };
float gemm_bias_add_fp16(const GemmBiasAddArgs& args, float gemm_bias_add_relu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
const StreamConfig& config, float gemm_bias_add_gelu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
ActivationType op_type); float gemm_bias_add_silu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
float gemm_bias_add_sigmoid_fp16(const GemmBiasAddArgs& args, const StreamConfig& config);
...@@ -24,6 +24,7 @@ using ALayout = Row; ...@@ -24,6 +24,7 @@ using ALayout = Row;
using BLayout = Row; using BLayout = Row;
using D0Layout = Row; using D0Layout = Row;
using CLayout = Row; using CLayout = Row;
using DsLayout = ck::Tuple<D0Layout>;
using AElementOp = PassThrough; using AElementOp = PassThrough;
using BElementOp = PassThrough; using BElementOp = PassThrough;
...@@ -34,7 +35,7 @@ using S = ck::Sequence<Is...>; ...@@ -34,7 +35,7 @@ using S = ck::Sequence<Is...>;
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding; static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
// clang-format off // clang-format off
template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp> template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3< using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType, ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType, DsDataType, CDataType, AccDataType, CShuffleDataType,
...@@ -51,7 +52,7 @@ using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMul ...@@ -51,7 +52,7 @@ using DeviceOpInstance_64_16_16_64 = ck::tensor_operation::device::DeviceGemmMul
1, 1, 1, 1,
S<1, 16, 1, 4>, S<4, 4>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>; S<1, 16, 1, 4>, S<4, 4>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
template <typename DsLayout, typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp> template <typename ADataType, typename BDataType, typename DsDataType, typename CDataType, typename CDEElementOp>
using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3< using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_Xdl_CShuffle_V3<
ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType, ALayout, BLayout, DsLayout, CLayout, ADataType, BDataType,
DsDataType, CDataType, AccDataType, CShuffleDataType, DsDataType, CDataType, AccDataType, CShuffleDataType,
...@@ -69,7 +70,7 @@ using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_ ...@@ -69,7 +70,7 @@ using DeviceOpInstance_default = ck::tensor_operation::device::DeviceGemmMultiD_
S<1, 16, 1, 4>, S<2, 2>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>; S<1, 16, 1, 4>, S<2, 2>, ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v1, F16>;
// clang-format on // clang-format on
template <typename DsLayout, typename CDEElementOp> template <typename CDEElementOp>
float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config) float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config)
{ {
using ADataType = ck::half_t; using ADataType = ck::half_t;
...@@ -127,46 +128,31 @@ float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config) ...@@ -127,46 +128,31 @@ float run_impl(const GemmBiasAddArgs& args, const StreamConfig& config)
return true; return true;
}; };
auto gemm = DeviceOpInstance_64_16_16_64<DsLayout, auto gemm =
ADataType, DeviceOpInstance_64_16_16_64<ADataType, BDataType, DsDataType, CDataType, CDEElementOp>{};
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
if(!Run(gemm)) if(!Run(gemm))
{ {
auto gemm_def = DeviceOpInstance_default<DsLayout, auto gemm_def =
ADataType, DeviceOpInstance_default<ADataType, BDataType, DsDataType, CDataType, CDEElementOp>{};
BDataType,
DsDataType,
CDataType,
CDEElementOp>{};
Run(gemm_def); Run(gemm_def);
} }
return ave_time; return ave_time;
} }
float gemm_bias_add_fp16(const GemmBiasAddArgs& args,
const StreamConfig& config, float gemm_bias_add_relu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
ActivationType op_type)
{ {
using DsLayout = ck::Tuple<D0Layout>; return run_impl<ck::impl::AddActivation<Relu>>(args, config);
switch(op_type) }
{ float gemm_bias_add_gelu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
case ActivationType::Gelu: {
case ActivationType::Geglu: return run_impl<ck::impl::AddActivation<Gelu>>(args, config);
case ActivationType::GeluNoneApproximate: }
case ActivationType::GeGluNoneApproximate: float gemm_bias_add_silu_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
return run_impl<DsLayout, ck::impl::AddActivation<Gelu>>(args, config); {
case ActivationType::Relu: return run_impl<ck::impl::AddActivation<Silu>>(args, config);
return run_impl<DsLayout, ck::impl::AddActivation<Relu>>(args, config); }
case ActivationType::Silu: float gemm_bias_add_sigmoid_fp16(const GemmBiasAddArgs& args, const StreamConfig& config)
case ActivationType::Swiglu: {
return run_impl<DsLayout, ck::impl::AddActivation<Silu>>(args, config); return run_impl<ck::impl::AddActivation<Sigmoid>>(args, config);
case ActivationType::Sigmoid:
return run_impl<DsLayout, ck::impl::AddActivation<Sigmoid>>(args, config);
case ActivationType::Identity:
case ActivationType::InvalidType:
default: return 0;
}
} }
...@@ -172,8 +172,8 @@ int main(int argc, char* argv[]) ...@@ -172,8 +172,8 @@ int main(int argc, char* argv[])
printf("arg1: verification (0=no, 1=yes)\n"); printf("arg1: verification (0=no, 1=yes)\n");
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"); printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
printf("arg3: time kernel (0=no, 1=yes)\n"); printf("arg3: time kernel (0=no, 1=yes)\n");
printf("arg4 to 9: M (256x), N(128x), K(32x)m, op_type(Gelu = 0, Relu, Silu, Swiglu, " printf("arg4 to 9: M (256x), N(128x), K(32x)m, op_type(Gelu = 0, Relu = 1, Silu = 2, "
"Geglu, Identity, GeluNoneApproximate, GeGluNoneApproximate)\n"); "Sigmoid = 3\n");
exit(0); exit(0);
} }
...@@ -238,9 +238,15 @@ int main(int argc, char* argv[]) ...@@ -238,9 +238,15 @@ int main(int argc, char* argv[])
N, N,
K}; K};
float ave_time = gemm_bias_add_fp16(gemm_args, float ave_time = 0;
StreamConfig{nullptr, time_kernel, 20, 50}, if(op_type == 0)
static_cast<ActivationType>(op_type)); gemm_bias_add_gelu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else if(op_type == 1)
gemm_bias_add_relu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else if(op_type == 2)
gemm_bias_add_silu_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
else
gemm_bias_add_sigmoid_fp16(gemm_args, StreamConfig{nullptr, time_kernel, 20, 50});
// float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel, 20, 50}); // float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel, 20, 50});
std::size_t flop = std::size_t(2) * M * N * K; std::size_t flop = std::size_t(2) * M * N * K;
...@@ -275,23 +281,14 @@ int main(int argc, char* argv[]) ...@@ -275,23 +281,14 @@ int main(int argc, char* argv[])
} }
} }
}; };
ActivationType type = static_cast<ActivationType>(op_type); if(op_type == 0)
switch(type)
{
case ActivationType::Gelu:
case ActivationType::Geglu:
case ActivationType::GeluNoneApproximate:
case ActivationType::GeGluNoneApproximate:
run_elementwise(ck::impl::AddActivation<Gelu>{}); run_elementwise(ck::impl::AddActivation<Gelu>{});
break; else if(op_type == 1)
case ActivationType::Relu: run_elementwise(ck::impl::AddActivation<Relu>{}); break; run_elementwise(ck::impl::AddActivation<Relu>{});
case ActivationType::Silu: else if(op_type == 2)
case ActivationType::Swiglu: run_elementwise(ck::impl::AddActivation<Silu>{}); break; run_elementwise(ck::impl::AddActivation<Silu>{});
case ActivationType::Sigmoid: run_elementwise(ck::impl::AddActivation<Sigmoid>{}); break; else
case ActivationType::Identity: run_elementwise(ck::impl::AddActivation<Sigmoid>{});
case ActivationType::InvalidType:
default: break;
}
e_device_buf.FromDevice(e_m_n_device_result.mData.data()); e_device_buf.FromDevice(e_m_n_device_result.mData.data());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment