Unverified Commit a768dea5 authored by Rostyslav Geyyer's avatar Rostyslav Geyyer Committed by GitHub
Browse files

Merge branch 'develop' into lwpck-471

parents 3f976dd0 0345963e
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp" #include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward.hpp" #include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward.hpp"
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_dl.hpp"
#include "ck/library/utility/algorithm.hpp" #include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp" #include "ck/library/utility/check_err.hpp"
...@@ -199,93 +198,48 @@ bool profile_grouped_conv_fwd_impl(int do_verification, ...@@ -199,93 +198,48 @@ bool profile_grouped_conv_fwd_impl(int do_verification,
} }
}; };
// xdl using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD<NDimSpatial,
InLayout,
WeiLayout,
ck::Tuple<>,
OutLayout,
InDataType,
WeiDataType,
ck::Tuple<>,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp>;
// get device op instances
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
std::cout << "xdl found " << op_ptrs.size() << " instances" << std::endl;
for(auto& op_ptr : op_ptrs)
{ {
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleD<NDimSpatial, auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
InLayout, wei_device_buf.GetDeviceBuffer(),
WeiLayout, {},
ck::Tuple<>, out_device_buf.GetDeviceBuffer(),
OutLayout, a_g_n_c_wis_lengths,
InDataType, a_g_n_c_wis_strides,
WeiDataType, b_g_k_c_xs_lengths,
ck::Tuple<>, b_g_k_c_xs_strides,
OutDataType, {},
InElementOp, {},
WeiElementOp, e_g_n_k_wos_lengths,
OutElementOp>; e_g_n_k_wos_strides,
conv_filter_strides,
// get device op instances conv_filter_dilations,
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory< input_left_pads,
DeviceOp>::GetInstances(); input_right_pads,
in_element_op,
std::cout << "xdl found " << op_ptrs.size() << " instances" << std::endl; wei_element_op,
out_element_op);
for(auto& op_ptr : op_ptrs)
{ run_impl(op_ptr, argument_ptr);
auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
wei_device_buf.GetDeviceBuffer(),
{},
out_device_buf.GetDeviceBuffer(),
a_g_n_c_wis_lengths,
a_g_n_c_wis_strides,
b_g_k_c_xs_lengths,
b_g_k_c_xs_strides,
{},
{},
e_g_n_k_wos_lengths,
e_g_n_k_wos_strides,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads,
in_element_op,
wei_element_op,
out_element_op);
run_impl(op_ptr, argument_ptr);
}
}
// dl
{
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwd<NDimSpatial,
InLayout,
WeiLayout,
OutLayout,
InDataType,
WeiDataType,
OutDataType,
InElementOp,
WeiElementOp,
OutElementOp>;
// get device op instances
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
DeviceOp>::GetInstances();
std::cout << "dl found " << op_ptrs.size() << " instances" << std::endl;
for(auto& op_ptr : op_ptrs)
{
auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
wei_device_buf.GetDeviceBuffer(),
out_device_buf.GetDeviceBuffer(),
a_g_n_c_wis_lengths,
a_g_n_c_wis_strides,
b_g_k_c_xs_lengths,
b_g_k_c_xs_strides,
e_g_n_k_wos_lengths,
e_g_n_k_wos_strides,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads,
in_element_op,
wei_element_op,
out_element_op);
run_impl(op_ptr, argument_ptr);
}
} }
std::cout << "Best configuration parameters:" std::cout << "Best configuration parameters:"
......
# ckProfiler
set(PROFILER_SOURCES
profiler.cpp
profile_gemm.cpp
profile_gemm_splitk.cpp
profile_gemm_bilinear.cpp
profile_gemm_bias_add_reduce.cpp
profile_gemm_add_add_fastgelu.cpp
profile_gemm_add_fastgelu.cpp
profile_gemm_fastgelu.cpp
profile_gemm_reduce.cpp
profile_batched_gemm.cpp
profile_batched_gemm_gemm.cpp
profile_batched_gemm_add_relu_gemm_add.cpp
profile_batched_gemm_reduce.cpp
profile_grouped_gemm.cpp
profile_conv_fwd.cpp
profile_conv_fwd_bias_relu.cpp
profile_conv_fwd_bias_relu_add.cpp
profile_conv_bwd_data.cpp
profile_grouped_conv_fwd.cpp
profile_grouped_conv_bwd_weight.cpp
profile_reduce.cpp
profile_groupnorm.cpp
profile_layernorm.cpp
profile_softmax.cpp
profile_batchnorm_fwd.cpp
profile_batchnorm_bwd.cpp
)
set(PROFILER_EXECUTABLE ckProfiler)
add_executable(${PROFILER_EXECUTABLE} ${PROFILER_SOURCES})
target_compile_options(${PROFILER_EXECUTABLE} PRIVATE -Wno-global-constructors)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE utility)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_splitk_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_bilinear_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_add_add_fastgelu_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_add_fastgelu_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_fastgelu_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_reduce_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_gemm_bias_add_reduce_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batched_gemm_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batched_gemm_gemm_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batched_gemm_add_relu_gemm_add_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batched_gemm_reduce_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_gemm_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv2d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv1d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv2d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_fwd_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv1d_bwd_data_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv2d_bwd_data_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv3d_bwd_data_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv1d_bwd_weight_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv2d_bwd_weight_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_bwd_weight_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv2d_fwd_bias_relu_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_conv2d_fwd_bias_relu_add_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_normalization_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_softmax_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_reduce_instance)
target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_batchnorm_instance)
rocm_install(TARGETS ${PROFILER_EXECUTABLE} COMPONENT profiler)
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_batched_gemm_impl.hpp" #include "profiler/profile_batched_gemm_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
{ {
...@@ -25,12 +26,15 @@ enum struct GemmDataType ...@@ -25,12 +26,15 @@ enum struct GemmDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "batched_gemm"
#define OP_DESC "Batched GEMM"
int profile_batched_gemm(int argc, char* argv[]) int profile_batched_gemm(int argc, char* argv[])
{ {
if(argc != 18) if(argc != 18)
{ {
// clang-format off // clang-format off
printf("arg1: tensor operation (batched_gemm: Batched GEMM)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16, 2: bf16, 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16, 2: bf16, 3: int8)\n");
printf("arg3: matrix layout (0: A[g, m, k] * B[g, k, n] = C[g, m, n];\n"); printf("arg3: matrix layout (0: A[g, m, k] * B[g, k, n] = C[g, m, n];\n");
printf(" 1: A[g, m, k] * B[g, n, k] = C[g, m, n];\n"); printf(" 1: A[g, m, k] * B[g, n, k] = C[g, m, n];\n");
...@@ -195,3 +199,5 @@ int profile_batched_gemm(int argc, char* argv[]) ...@@ -195,3 +199,5 @@ int profile_batched_gemm(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_batched_gemm);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_batched_gemm_add_relu_gemm_add_impl.hpp" #include "profiler/profile_batched_gemm_add_relu_gemm_add_impl.hpp"
#include "profiler_operation_registry.hpp"
using F16 = ck::half_t; using F16 = ck::half_t;
using F32 = float; using F32 = float;
...@@ -14,6 +15,9 @@ using F32 = float; ...@@ -14,6 +15,9 @@ using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor; using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor; using Col = ck::tensor_layout::gemm::ColumnMajor;
#define OP_NAME "batched_gemm_add_relu_gemm_add"
#define OP_DESC "Batched GEMM+Add+Relu+GEMM+Add"
int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[]) int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[])
{ {
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
...@@ -109,8 +113,7 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[]) ...@@ -109,8 +113,7 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[])
} }
else else
{ {
printf("arg1: tensor operation (batched_gemm_add_relu_gemm_add: " printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
"Batched_GEMM+Add+Relu+Gemm+Add)\n");
printf("arg2: data type (1: fp16)\n"); printf("arg2: data type (1: fp16)\n");
printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] " printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] "
"= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = " "= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = "
...@@ -207,3 +210,5 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[]) ...@@ -207,3 +210,5 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_batched_gemm_add_relu_gemm_add);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_batched_gemm_gemm_impl.hpp" #include "profiler/profile_batched_gemm_gemm_impl.hpp"
#include "profiler_operation_registry.hpp"
using F16 = ck::half_t; using F16 = ck::half_t;
using F32 = float; using F32 = float;
...@@ -14,6 +15,9 @@ using F32 = float; ...@@ -14,6 +15,9 @@ using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor; using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor; using Col = ck::tensor_layout::gemm::ColumnMajor;
#define OP_NAME "batched_gemm_gemm"
#define OP_DESC "Batched GEMM+GEMM"
int profile_batched_gemm_gemm(int argc, char* argv[]) int profile_batched_gemm_gemm(int argc, char* argv[])
{ {
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
...@@ -101,7 +105,7 @@ int profile_batched_gemm_gemm(int argc, char* argv[]) ...@@ -101,7 +105,7 @@ int profile_batched_gemm_gemm(int argc, char* argv[])
} }
else else
{ {
printf("arg1: tensor operation (batched_gemm_gemm: Batched_GEMM+Gemm)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (1: fp16)\n"); printf("arg2: data type (1: fp16)\n");
printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] " printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] "
"= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = E1[m, " "= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = E1[m, "
...@@ -179,3 +183,5 @@ int profile_batched_gemm_gemm(int argc, char* argv[]) ...@@ -179,3 +183,5 @@ int profile_batched_gemm_gemm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_batched_gemm_gemm);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_batched_gemm_reduce_impl.hpp" #include "profiler/profile_batched_gemm_reduce_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "batched_gemm_reduce"
#define OP_DESC "Batched GEMM+Reduce"
int profile_batched_gemm_reduce(int argc, char* argv[]) int profile_batched_gemm_reduce(int argc, char* argv[])
{ {
...@@ -26,7 +30,7 @@ int profile_batched_gemm_reduce(int argc, char* argv[]) ...@@ -26,7 +30,7 @@ int profile_batched_gemm_reduce(int argc, char* argv[])
if(argc != 15) if(argc != 15)
{ {
printf("arg1: tensor operation (batched_gemm_reduce: BatchedGEMM+Reduce)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -151,3 +155,5 @@ int profile_batched_gemm_reduce(int argc, char* argv[]) ...@@ -151,3 +155,5 @@ int profile_batched_gemm_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_batched_gemm_reduce);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <getopt.h> #include <getopt.h>
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
#include "profiler/include/profile_batchnorm_backward_impl.hpp" #include "profiler/profile_batchnorm_backward_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
...@@ -202,3 +203,5 @@ int profile_batchnorm_backward(int argc, char* argv[]) ...@@ -202,3 +203,5 @@ int profile_batchnorm_backward(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("bnorm_bwd", "Batchnorm backward", profile_batchnorm_backward);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <getopt.h> #include <getopt.h>
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
#include "profiler/include/profile_batchnorm_forward_impl.hpp" #include "profiler/profile_batchnorm_forward_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
...@@ -214,3 +215,5 @@ int profile_batchnorm_forward(int argc, char* argv[]) ...@@ -214,3 +215,5 @@ int profile_batchnorm_forward(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("bnorm_fwd", "Batchnorm forward", profile_batchnorm_forward);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_bwd_data_impl.hpp" #include "profiler/profile_conv_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -24,10 +25,13 @@ enum struct ConvDataType ...@@ -24,10 +25,13 @@ enum struct ConvDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "conv_bwd_data"
#define OP_DESC "Convolution Backward Data"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout std::cout
<< "arg1: tensor operation (conv_bwd_data: Convolution Backward Data)\n" << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight bf16, Output bf16\n" << " 2: Input bf16, Weight bf16, Output bf16\n"
...@@ -182,3 +186,5 @@ int profile_conv_bwd_data(int argc, char* argv[]) ...@@ -182,3 +186,5 @@ int profile_conv_bwd_data(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_bwd_data);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_impl.hpp" #include "profiler/profile_conv_fwd_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -24,11 +25,14 @@ enum struct ConvDataType ...@@ -24,11 +25,14 @@ enum struct ConvDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "conv_fwd"
#define OP_DESC "Convolution Forward"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout std::cout
// clang-format-off // clang-format-off
<< "arg1: tensor operation (conv_fwd: Convolution Forward)\n" << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight bf16, Output bf16\n" << " 2: Input bf16, Weight bf16, Output bf16\n"
...@@ -184,3 +188,5 @@ int profile_conv_fwd(int argc, char* argv[]) ...@@ -184,3 +188,5 @@ int profile_conv_fwd(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_bias_relu_impl.hpp" #include "profiler/profile_conv_fwd_bias_relu_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct ConvDataType enum struct ConvDataType
{ {
...@@ -32,11 +33,14 @@ enum struct ConvOutputLayout ...@@ -32,11 +33,14 @@ enum struct ConvOutputLayout
NHWK, // 1 NHWK, // 1
}; };
#define OP_NAME "conv_fwd_bias_relu"
#define OP_DESC "Convolution Forward+Bias+ReLU"
int profile_conv_fwd_bias_relu(int argc, char* argv[]) int profile_conv_fwd_bias_relu(int argc, char* argv[])
{ {
if(argc != 25) if(argc != 25)
{ {
printf("arg1: tensor operation (conv_fwd_bias_relu: ForwardConvolution+Bias+ReLu)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -114,3 +118,5 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[]) ...@@ -114,3 +118,5 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd_bias_relu);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_bias_relu_add_impl.hpp" #include "profiler/profile_conv_fwd_bias_relu_add_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct ConvDataType enum struct ConvDataType
{ {
...@@ -32,12 +33,14 @@ enum struct ConvOutputLayout ...@@ -32,12 +33,14 @@ enum struct ConvOutputLayout
NHWK, // 1 NHWK, // 1
}; };
#define OP_NAME "conv_fwd_bias_relu_add"
#define OP_DESC "Convolution Forward+Bias+ReLU+Add"
int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
{ {
if(argc != 25) if(argc != 25)
{ {
printf( printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
"arg1: tensor operation (conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLu+Add)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -115,3 +118,5 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) ...@@ -115,3 +118,5 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd_bias_relu_add);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_impl.hpp" #include "profiler/profile_gemm_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
{ {
...@@ -24,9 +25,12 @@ enum struct GemmDataType ...@@ -24,9 +25,12 @@ enum struct GemmDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "gemm"
#define OP_DESC "GEMM"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout << "arg1: tensor operation (gemm: GEMM)\n" std::cout << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n" << "arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"
<< "arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n" << "arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"
<< " 1: A[m, k] * B[n, k] = C[m, n];\n" << " 1: A[m, k] * B[n, k] = C[m, n];\n"
...@@ -184,3 +188,5 @@ int profile_gemm(int argc, char* argv[]) ...@@ -184,3 +188,5 @@ int profile_gemm(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_add_add_fastgelu_impl.hpp" #include "profiler/profile_gemm_add_add_fastgelu_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_add_fastgelu"
#define OP_DESC "GEMM+Add+Add+FastGeLU"
int profile_gemm_add_add_fastgelu(int argc, char* argv[]) int profile_gemm_add_add_fastgelu(int argc, char* argv[])
{ {
...@@ -29,7 +33,7 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[]) ...@@ -29,7 +33,7 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
if(argc != 16) if(argc != 16)
{ {
// clang-format off // clang-format off
printf("arg1: tensor operation (gemm_add_add_fastgelu: GEMM+Add+Add+FastGeLU)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n] + D1[m, n]);\n"); printf("arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n] + D1[m, n]);\n");
printf(" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n] + D1[m, n]);\n"); printf(" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n] + D1[m, n]);\n");
...@@ -150,3 +154,5 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[]) ...@@ -150,3 +154,5 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_add_add_fastgelu);
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_add_fastgelu_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_fastgelu"
#define OP_DESC "GEMM+Add+FastGeLU"
int profile_gemm_add_fastgelu(int argc, char* argv[])
{
enum struct MatrixLayout
{
MK_KN_MN_MN, // 0
MK_NK_MN_MN, // 1
KM_KN_MN_MN, // 2
KM_NK_MN_MN, // 3
};
enum struct MatrixDataType
{
F32_F32_F32_F32, // 0
F16_F16_F16_F16, // 1
BF16_BF16_BF16_BF16, // 2
INT8_INT8_INT8_INT8, // 3
};
if(argc != 15)
{
// clang-format off
printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n]);\n");
printf(" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n]);\n");
printf(" 2: E[m, n] = FastGeLU(A[k, m] * B[k, n] + D0[m, n]);\n");
printf(" 3: E[m, n] = FastGeLU(A[k, m] * B[n, k] + D0[m, n]))\n");
printf("arg4: verification (0: no; 1: yes)\n");
printf("arg5: initialization (0: no init; 1: integer value; 2: decimal value)\n");
printf("arg6: print tensor value (0: no; 1: yes)\n");
printf("arg7: time kernel (0=no, 1=yes)\n");
printf("arg8 to 14: M, N, K, StrideA, StrideB, StrideD0, StrideE\n");
// clang-format on
exit(1);
}
const auto data_type = static_cast<MatrixDataType>(std::stoi(argv[2]));
const auto layout = static_cast<MatrixLayout>(std::stoi(argv[3]));
const bool do_verification = std::stoi(argv[4]);
const int init_method = std::stoi(argv[5]);
const bool do_log = std::stoi(argv[6]);
const bool time_kernel = std::stoi(argv[7]);
const int M = std::stoi(argv[8]);
const int N = std::stoi(argv[9]);
const int K = std::stoi(argv[10]);
const int StrideA = std::stoi(argv[11]);
const int StrideB = std::stoi(argv[12]);
const int StrideD0 = std::stoi(argv[13]);
const int StrideE = std::stoi(argv[14]);
using F16 = ck::half_t;
using F32 = float;
using Row = ck::tensor_layout::gemm::RowMajor;
using Col = ck::tensor_layout::gemm::ColumnMajor;
auto profile = [&](auto a_type,
auto b_type,
auto acc_type,
auto d0_type,
auto e_type,
auto a_layout,
auto b_layout,
auto d0_layout,
auto e_layout) {
using ADataType = decltype(a_type);
using BDataType = decltype(b_type);
using AccDataType = decltype(acc_type);
using D0DataType = decltype(d0_type);
using EDataType = decltype(e_type);
using ALayout = decltype(a_layout);
using BLayout = decltype(b_layout);
using D0Layout = decltype(d0_layout);
using ELayout = decltype(e_layout);
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
const int DefaultStrideB = ck::is_same_v<BLayout, Row> ? N : K;
const int DefaultStrideD0 = ck::is_same_v<D0Layout, Row> ? N : M;
const int DefaultStrideE = ck::is_same_v<ELayout, Row> ? N : M;
bool pass = ck::profiler::profile_gemm_add_fastgelu_impl<ADataType,
BDataType,
AccDataType,
D0DataType,
EDataType,
ALayout,
BLayout,
D0Layout,
ELayout>(
do_verification,
init_method,
do_log,
time_kernel,
M,
N,
K,
(StrideA < 0) ? DefaultStrideA : StrideA,
(StrideB < 0) ? DefaultStrideB : StrideB,
(StrideD0 < 0) ? DefaultStrideD0 : StrideD0,
(StrideE < 0) ? DefaultStrideE : StrideE);
return pass ? 0 : 1;
};
if(data_type == MatrixDataType::F16_F16_F16_F16 && layout == MatrixLayout::MK_KN_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, Row{}, Row{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F16_F16_F16 && layout == MatrixLayout::MK_NK_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, Row{}, Col{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F16_F16_F16 && layout == MatrixLayout::KM_KN_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, Col{}, Row{}, Row{}, Row{});
}
else if(data_type == MatrixDataType::F16_F16_F16_F16 && layout == MatrixLayout::KM_NK_MN_MN)
{
return profile(F16{}, F16{}, F32{}, F16{}, F16{}, Col{}, Col{}, Row{}, Row{});
}
else
{
std::cout << "this data_type & layout is not implemented" << std::endl;
return 1;
}
}
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_add_fastgelu);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment