Commit ca313a29 authored by letaoqin's avatar letaoqin
Browse files

Merge branch 'develop' into dl_conv_multiple_d

parents d47bf127 8784a72e
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <getopt.h>
#include "ck/library/utility/host_common_util.hpp"
#include "profiler/profile_batchnorm_backward_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t;
using namespace std;
static const struct option long_options[] = {{"inOutLengths", required_argument, nullptr, 'D'},
{"reduceDims", required_argument, nullptr, 'R'},
{"dumpout", required_argument, nullptr, 'o'},
{"verify", required_argument, nullptr, 'v'},
{"help", no_argument, nullptr, '?'},
{nullptr, 0, nullptr, 0}};
class BatchnormBwdArgParser
{
private:
int option_index = 0;
public:
std::vector<size_t> inLengths;
std::vector<int> reduceDims;
bool do_verification = false;
bool do_dumpout = false;
bool haveSavedMeanInvVar;
int data_type = 0;
int init_method = 2;
bool time_kernel = false;
BatchnormBwdArgParser() = default;
~BatchnormBwdArgParser() = default;
void show_usage(const char* cmd)
{
// clang-format off
std::cout << "Usage of " << cmd << std::endl;
std::cout << "--inOutLengths or -D, comma separated list of input tensor dimension lengths, must have 4 integers for nhwc" << std::endl;
std::cout << "--reduceDims or -R, comma separated list of dimensions to reduce on" << std::endl;
std::cout << "--verify or -v, 1/0 to indicate whether to verify the result by comparing with the host-based batch-normalization" << std::endl;
std::cout << "Arg1: data type (0: fp16, 1: fp32, 5: bp16, 6: fp64)" << std::endl;
std::cout << "Arg2 -- 1/0 to indicate whether to use saved mean and invVariance" << std::endl;
std::cout << "Arg3 -- init method used for dy and bnScale (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)" << std::endl;
std::cout << "Arg4 -- time kernel (0=no, 1=yes)" << std::endl;
// clang-format on
};
int operator()(int argc, char* argv[])
{
using ck::host_common::getTypeValuesFromString;
int ch;
optind++; // to skip the module name
while(1)
{
ch = getopt_long(argc, argv, "D:R:v:o:", long_options, &option_index);
if(ch == -1)
break;
switch(ch)
{
case 'D':
if(!optarg)
throw std::runtime_error("Invalid option format!");
inLengths = getTypeValuesFromString<size_t>(optarg);
break;
case 'R':
if(!optarg)
throw std::runtime_error("Invalid option format!");
reduceDims = getTypeValuesFromString<int>(optarg);
break;
case 'v':
if(!optarg)
throw std::runtime_error("Invalid option format!");
do_verification = static_cast<bool>(std::atoi(optarg));
break;
case 'o':
if(!optarg)
throw std::runtime_error("Invalid option format!");
do_dumpout = static_cast<bool>(std::atoi(optarg));
break;
case '?':
if(std::string(long_options[option_index].name) == "help")
{
show_usage(argv[0]);
return -1;
};
break;
default:
show_usage(argv[0]);
std::cerr << "Invalid cmd-line options!" << std::endl;
return -1;
};
};
if(optind + 4 > argc)
throw std::runtime_error("Invalid cmd-line arguments, more argumetns are needed!");
data_type = std::atoi(argv[optind++]);
haveSavedMeanInvVar = std::atoi(argv[optind++]);
init_method = std::atoi(argv[optind++]);
time_kernel = static_cast<bool>(std::atoi(argv[optind++]));
if(data_type != 0 && data_type != 1 && data_type != 3 && data_type != 5 && data_type != 6)
return -1;
return 0;
};
}; // end of class AppArgs
static const double epsilon = std::numeric_limits<float>::epsilon();
int profile_batchnorm_backward(int argc, char* argv[])
{
using ck::profiler::profile_batchnorm_backward_impl;
BatchnormBwdArgParser arg_parser;
if(arg_parser(argc, argv) != 0)
return -1;
using F16 = ck::half_t;
using F32 = float;
using BF16 = ck::bhalf_t;
using F64 = double;
if(arg_parser.data_type == 0)
{
if(arg_parser.inLengths.size() == 4 && arg_parser.reduceDims.size() == 3)
{
profile_batchnorm_backward_impl<F16, F32, F32, F32, F16, F32, F32, 4, 3>(
arg_parser.do_verification,
arg_parser.init_method,
arg_parser.do_dumpout,
arg_parser.time_kernel,
arg_parser.inLengths,
arg_parser.reduceDims,
arg_parser.haveSavedMeanInvVar,
epsilon);
};
}
else if(arg_parser.data_type == 1)
{
if(arg_parser.inLengths.size() == 4 && arg_parser.reduceDims.size() == 3)
{
profile_batchnorm_backward_impl<F32, F32, F32, F32, F32, F32, F32, 4, 3>(
arg_parser.do_verification,
arg_parser.init_method,
arg_parser.do_dumpout,
arg_parser.time_kernel,
arg_parser.inLengths,
arg_parser.reduceDims,
arg_parser.haveSavedMeanInvVar,
epsilon);
};
}
else if(arg_parser.data_type == 5)
{
if(arg_parser.inLengths.size() == 4 && arg_parser.reduceDims.size() == 3)
{
profile_batchnorm_backward_impl<BF16, F32, F32, F32, BF16, F32, F32, 4, 3>(
arg_parser.do_verification,
arg_parser.init_method,
arg_parser.do_dumpout,
arg_parser.time_kernel,
arg_parser.inLengths,
arg_parser.reduceDims,
arg_parser.haveSavedMeanInvVar,
epsilon);
};
}
else if(arg_parser.data_type == 6)
{
if(arg_parser.inLengths.size() == 4 && arg_parser.reduceDims.size() == 3)
{
profile_batchnorm_backward_impl<F64, F64, F64, F64, F64, F64, F64, 4, 3>(
arg_parser.do_verification,
arg_parser.init_method,
arg_parser.do_dumpout,
arg_parser.time_kernel,
arg_parser.inLengths,
arg_parser.reduceDims,
arg_parser.haveSavedMeanInvVar,
epsilon);
};
}
return 0;
}
REGISTER_PROFILER_OPERATION("bnorm_bwd", "Batchnorm backward", profile_batchnorm_backward);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <getopt.h> #include <getopt.h>
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
#include "profiler/include/profile_batchnorm_forward_impl.hpp" #include "profiler/profile_batchnorm_forward_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
...@@ -214,3 +215,5 @@ int profile_batchnorm_forward(int argc, char* argv[]) ...@@ -214,3 +215,5 @@ int profile_batchnorm_forward(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("bnorm_fwd", "Batchnorm forward", profile_batchnorm_forward);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_bwd_data_impl.hpp" #include "profiler/profile_conv_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -24,10 +25,13 @@ enum struct ConvDataType ...@@ -24,10 +25,13 @@ enum struct ConvDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "conv_bwd_data"
#define OP_DESC "Convolution Backward Data"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout std::cout
<< "arg1: tensor operation (conv_bwd_data: Convolution Backward Data)\n" << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight bf16, Output bf16\n" << " 2: Input bf16, Weight bf16, Output bf16\n"
...@@ -182,3 +186,5 @@ int profile_conv_bwd_data(int argc, char* argv[]) ...@@ -182,3 +186,5 @@ int profile_conv_bwd_data(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_bwd_data);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_impl.hpp" #include "profiler/profile_conv_fwd_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -24,11 +25,14 @@ enum struct ConvDataType ...@@ -24,11 +25,14 @@ enum struct ConvDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "conv_fwd"
#define OP_DESC "Convolution Forward"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout std::cout
// clang-format-off // clang-format-off
<< "arg1: tensor operation (conv_fwd: Convolution Forward)\n" << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight bf16, Output bf16\n" << " 2: Input bf16, Weight bf16, Output bf16\n"
...@@ -184,3 +188,5 @@ int profile_conv_fwd(int argc, char* argv[]) ...@@ -184,3 +188,5 @@ int profile_conv_fwd(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_bias_relu_impl.hpp" #include "profiler/profile_conv_fwd_bias_relu_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct ConvDataType enum struct ConvDataType
{ {
...@@ -32,11 +33,14 @@ enum struct ConvOutputLayout ...@@ -32,11 +33,14 @@ enum struct ConvOutputLayout
NHWK, // 1 NHWK, // 1
}; };
#define OP_NAME "conv_fwd_bias_relu"
#define OP_DESC "Convolution Forward+Bias+ReLU"
int profile_conv_fwd_bias_relu(int argc, char* argv[]) int profile_conv_fwd_bias_relu(int argc, char* argv[])
{ {
if(argc != 25) if(argc != 25)
{ {
printf("arg1: tensor operation (conv_fwd_bias_relu: ForwardConvolution+Bias+ReLu)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -114,3 +118,5 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[]) ...@@ -114,3 +118,5 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd_bias_relu);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_conv_fwd_bias_relu_add_impl.hpp" #include "profiler/profile_conv_fwd_bias_relu_add_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct ConvDataType enum struct ConvDataType
{ {
...@@ -32,12 +33,14 @@ enum struct ConvOutputLayout ...@@ -32,12 +33,14 @@ enum struct ConvOutputLayout
NHWK, // 1 NHWK, // 1
}; };
#define OP_NAME "conv_fwd_bias_relu_add"
#define OP_DESC "Convolution Forward+Bias+ReLU+Add"
int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
{ {
if(argc != 25) if(argc != 25)
{ {
printf( printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
"arg1: tensor operation (conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLu+Add)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -115,3 +118,5 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) ...@@ -115,3 +118,5 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_conv_fwd_bias_relu_add);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_impl.hpp" #include "profiler/profile_gemm_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
{ {
...@@ -24,9 +25,12 @@ enum struct GemmDataType ...@@ -24,9 +25,12 @@ enum struct GemmDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "gemm"
#define OP_DESC "GEMM"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout << "arg1: tensor operation (gemm: GEMM)\n" std::cout << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n" << "arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"
<< "arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n" << "arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"
<< " 1: A[m, k] * B[n, k] = C[m, n];\n" << " 1: A[m, k] * B[n, k] = C[m, n];\n"
...@@ -184,3 +188,5 @@ int profile_gemm(int argc, char* argv[]) ...@@ -184,3 +188,5 @@ int profile_gemm(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_add_add_fastgelu_impl.hpp" #include "profiler/profile_gemm_add_add_fastgelu_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_add_fastgelu"
#define OP_DESC "GEMM+Add+Add+FastGeLU"
int profile_gemm_add_add_fastgelu(int argc, char* argv[]) int profile_gemm_add_add_fastgelu(int argc, char* argv[])
{ {
...@@ -29,7 +33,7 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[]) ...@@ -29,7 +33,7 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
if(argc != 16) if(argc != 16)
{ {
// clang-format off // clang-format off
printf("arg1: tensor operation (gemm_add_add_fastgelu: GEMM+Add+Add+FastGeLU)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n] + D1[m, n]);\n"); printf("arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n] + D1[m, n]);\n");
printf(" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n] + D1[m, n]);\n"); printf(" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n] + D1[m, n]);\n");
...@@ -150,3 +154,5 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[]) ...@@ -150,3 +154,5 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_add_add_fastgelu);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_bias_add_reduce_impl.hpp" #include "profiler/profile_gemm_bias_add_reduce_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_bias_add_reduce"
#define OP_DESC "GEMM+Bias+Add+Reduce"
int profile_gemm_bias_add_reduce(int argc, char* argv[]) int profile_gemm_bias_add_reduce(int argc, char* argv[])
{ {
...@@ -26,7 +30,7 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[]) ...@@ -26,7 +30,7 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[])
if(!(argc == 14 || argc == 15)) if(!(argc == 14 || argc == 15))
{ {
printf("arg1: tensor operation (gemm: GEMM+bias+add+Reduce)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -159,3 +163,5 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[]) ...@@ -159,3 +163,5 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_bias_add_reduce);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_bilinear_impl.hpp" #include "profiler/profile_gemm_bilinear_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_bilinear"
#define OP_DESC "GEMM+Bilinear"
int profile_gemm_bilinear(int argc, char* argv[]) int profile_gemm_bilinear(int argc, char* argv[])
{ {
...@@ -29,7 +33,7 @@ int profile_gemm_bilinear(int argc, char* argv[]) ...@@ -29,7 +33,7 @@ int profile_gemm_bilinear(int argc, char* argv[])
if(argc != 17) if(argc != 17)
{ {
// clang-format off // clang-format off
printf("arg1: tensor operation (gemm_bilinear: GEMM+Bilinear)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: E[m, n] = alpha * A[m, k] * B[k, n] + beta * D[m, n];\n"); printf("arg3: matrix layout (0: E[m, n] = alpha * A[m, k] * B[k, n] + beta * D[m, n];\n");
printf(" 1: E[m, n] = alpha * A[m, k] * B[n, k] + beta * D[m, n];\n"); printf(" 1: E[m, n] = alpha * A[m, k] * B[n, k] + beta * D[m, n];\n");
...@@ -144,3 +148,5 @@ int profile_gemm_bilinear(int argc, char* argv[]) ...@@ -144,3 +148,5 @@ int profile_gemm_bilinear(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_bilinear);
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_reduce_impl.hpp" #include "profiler/profile_gemm_reduce_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_reduce"
#define OP_DESC "GEMM+Reduce"
int profile_gemm_reduce(int argc, char* argv[]) int profile_gemm_reduce(int argc, char* argv[])
{ {
...@@ -26,7 +30,7 @@ int profile_gemm_reduce(int argc, char* argv[]) ...@@ -26,7 +30,7 @@ int profile_gemm_reduce(int argc, char* argv[])
if(!(argc == 14 || argc == 15)) if(!(argc == 14 || argc == 15))
{ {
printf("arg1: tensor operation (gemm: GEMM+Reduce)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -146,3 +150,5 @@ int profile_gemm_reduce(int argc, char* argv[]) ...@@ -146,3 +150,5 @@ int profile_gemm_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_reduce);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_gemm_splitk_impl.hpp" #include "profiler/profile_gemm_splitk_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
{ {
...@@ -24,11 +25,14 @@ enum struct GemmDataType ...@@ -24,11 +25,14 @@ enum struct GemmDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "gemm_splitk"
#define OP_DESC "Split-K GEMM"
int profile_gemm_splitk(int argc, char* argv[]) int profile_gemm_splitk(int argc, char* argv[])
{ {
if(argc != 15) if(argc != 15)
{ {
printf("arg1: tensor operation (gemm_splitk: Split-K GEMM)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -146,3 +150,5 @@ int profile_gemm_splitk(int argc, char* argv[]) ...@@ -146,3 +150,5 @@ int profile_gemm_splitk(int argc, char* argv[])
return 1; return 1;
} }
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_gemm_splitk);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <iostream> #include <iostream>
#include <numeric> #include <numeric>
#include "profiler/include/profile_grouped_conv_bwd_weight_impl.hpp" #include "profiler/profile_grouped_conv_bwd_weight_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -23,9 +24,12 @@ enum struct ConvDataType ...@@ -23,9 +24,12 @@ enum struct ConvDataType
BF16_F32_BF16, // 2 BF16_F32_BF16, // 2
}; };
#define OP_NAME "grouped_conv_bwd_weight"
#define OP_DESC "Grouped Convolution Backward Weight"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout << "arg1: tensor operation (conv_bwd_weight: Convolution Backward Weight\n" std::cout << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight fp32, Output bf16)\n" << " 2: Input bf16, Weight fp32, Output bf16)\n"
...@@ -174,3 +178,5 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[]) ...@@ -174,3 +178,5 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_conv_bwd_weight);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_grouped_conv_fwd_impl.hpp" #include "profiler/profile_grouped_conv_fwd_impl.hpp"
#include "profiler_operation_registry.hpp"
namespace { namespace {
...@@ -24,11 +25,14 @@ enum struct ConvDataType ...@@ -24,11 +25,14 @@ enum struct ConvDataType
INT8_INT8_INT8, // 3 INT8_INT8_INT8, // 3
}; };
#define OP_NAME "grouped_conv_fwd"
#define OP_DESC "Grouped Convolution Forward"
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout std::cout
// clang-format off // clang-format off
<< "arg1: tensor operation (grouped_conv_fwd: Grouped Convolution Forward)\n" << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< " 2: Input bf16, Weight bf16, Output bf16\n" << " 2: Input bf16, Weight bf16, Output bf16\n"
...@@ -252,3 +256,5 @@ int profile_grouped_conv_fwd(int argc, char* argv[]) ...@@ -252,3 +256,5 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_conv_fwd);
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
#include <initializer_list> #include <initializer_list>
#include <cstdlib> #include <cstdlib>
#include "profiler/include/profile_grouped_gemm_impl.hpp" #include "profiler/profile_grouped_gemm_impl.hpp"
#include "profiler_operation_registry.hpp"
enum struct GemmMatrixLayout enum struct GemmMatrixLayout
{ {
...@@ -44,11 +45,14 @@ std::vector<int> argToIntArray(char* input) ...@@ -44,11 +45,14 @@ std::vector<int> argToIntArray(char* input)
return out; return out;
} }
#define OP_NAME "grouped_gemm"
#define OP_DESC "Grouped GEMM"
int profile_grouped_gemm(int argc, char* argv[]) int profile_grouped_gemm(int argc, char* argv[])
{ {
if(!(argc == 14)) if(!(argc == 14))
{ {
printf("arg1: tensor operation (grouped_gemm: Grouped GEMM)\n"); printf("arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n");
printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n"); printf("arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -161,3 +165,5 @@ int profile_grouped_gemm(int argc, char* argv[]) ...@@ -161,3 +165,5 @@ int profile_grouped_gemm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_grouped_gemm);
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
#include <vector> #include <vector>
#include <unordered_map> #include <unordered_map>
#include "profiler/include/data_type_enum.hpp" #include "profiler/data_type_enum.hpp"
#include "profiler/include/profile_groupnorm_impl.hpp" #include "profiler/profile_groupnorm_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
...@@ -43,9 +44,12 @@ struct GroupnormArgParser ...@@ -43,9 +44,12 @@ struct GroupnormArgParser
} }
}; };
#define OP_NAME "groupnorm"
#define OP_DESC "Group Normalization"
void print_help_groupnorm() void print_help_groupnorm()
{ {
std::cout << "arg1: tensor operation (groupnorm: Group normalization)\n" std::cout << "arg1: tensor operation (" OP_NAME ": " OP_DESC ")\n"
<< "arg2: data type (0: fp16; 1: fp32)\n" << "arg2: data type (0: fp16; 1: fp32)\n"
<< "arg3: verification (0: no; 1: yes)\n" << "arg3: verification (0: no; 1: yes)\n"
<< "arg4: initialization (0: no init; 1: integer value; 2: decimal value)\n" << "arg4: initialization (0: no init; 1: integer value; 2: decimal value)\n"
...@@ -104,3 +108,5 @@ int profile_groupnorm(int argc, char* argv[]) ...@@ -104,3 +108,5 @@ int profile_groupnorm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION(OP_NAME, OP_DESC, profile_groupnorm);
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
#include <vector> #include <vector>
#include <unordered_map> #include <unordered_map>
#include "profiler/include/data_type_enum.hpp" #include "profiler/data_type_enum.hpp"
#include "profiler/include/profile_layernorm_impl.hpp" #include "profiler/profile_layernorm_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
...@@ -96,3 +97,5 @@ int profile_layernorm(int argc, char* argv[]) ...@@ -96,3 +97,5 @@ int profile_layernorm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("layernorm", "Layer Normalization", profile_layernorm);
...@@ -13,8 +13,9 @@ ...@@ -13,8 +13,9 @@
#include "ck/library/utility/host_common_util.hpp" #include "ck/library/utility/host_common_util.hpp"
#include "profiler/include/profile_reduce_impl.hpp" #include "profiler/profile_reduce_impl.hpp"
#include "profiler/include/data_type_enum.hpp" #include "profiler/data_type_enum.hpp"
#include "profiler_operation_registry.hpp"
using namespace std; using namespace std;
...@@ -429,3 +430,5 @@ int profile_reduce(int argc, char* argv[]) ...@@ -429,3 +430,5 @@ int profile_reduce(int argc, char* argv[])
return (0); return (0);
}; };
REGISTER_PROFILER_OPERATION("reduce", "Reduce", profile_reduce);
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
#include <vector> #include <vector>
#include <unordered_map> #include <unordered_map>
#include "profiler/include/profile_softmax_impl.hpp" #include "profiler/profile_softmax_impl.hpp"
#include "profiler_operation_registry.hpp"
using ck::index_t; using ck::index_t;
using ck::profiler::SoftmaxDataType; using ck::profiler::SoftmaxDataType;
...@@ -164,3 +165,5 @@ int profile_softmax(int argc, char* argv[]) ...@@ -164,3 +165,5 @@ int profile_softmax(int argc, char* argv[])
// profile_normalization(argc, argv); // profile_normalization(argc, argv);
// return 0; // return 0;
// } // }
REGISTER_PROFILER_OPERATION("softmax", "Softmax", profile_softmax);
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstring> #include <cstdlib>
#include <iostream>
int profile_gemm(int, char*[]); #include "profiler_operation_registry.hpp"
int profile_gemm_splitk(int, char*[]);
int profile_gemm_bilinear(int, char*[]);
int profile_gemm_add_add_fastgelu(int, char*[]);
int profile_gemm_reduce(int, char*[]);
int profile_gemm_bias_add_reduce(int, char*[]);
int profile_batched_gemm(int, char*[]);
int profile_batched_gemm_gemm(int, char*[]);
int profile_batched_gemm_add_relu_gemm_add(int, char*[]);
int profile_batched_gemm_reduce(int, char*[]);
int profile_grouped_gemm(int, char*[]);
int profile_conv_fwd(int, char*[]);
int profile_conv_fwd_bias_relu(int, char*[]);
int profile_conv_fwd_bias_relu_add(int, char*[]);
int profile_conv_bwd_data(int, char*[]);
int profile_grouped_conv_fwd(int, char*[]);
int profile_grouped_conv_bwd_weight(int, char*[]);
int profile_softmax(int, char*[]);
int profile_layernorm(int, char*[]);
int profile_groupnorm(int, char*[]);
int profile_reduce(int, char*[]);
int profile_batchnorm_forward(int, char*[]);
static void print_helper_message() static void print_helper_message()
{ {
// clang-format off std::cout << "arg1: tensor operation " << ProfilerOperationRegistry::GetInstance() << std::endl;
printf("arg1: tensor operation (gemm: GEMM\n"
" gemm_splitk: Split-K GEMM\n"
" gemm_bilinear: GEMM+Bilinear\n"
" gemm_add_add_fastgelu: GEMM+Add+Add+FastGeLU\n"
" gemm_reduce: GEMM+Reduce\n"
" gemm_bias_add_reduce: GEMM+Bias+Add+Reduce\n"
" batched_gemm: Batched GEMM\n"
" batched_gemm_gemm: Batched+GEMM+GEMM\n"
" batched_gemm_add_relu_gemm_add: Batched+GEMM+bias+gelu+GEMM+bias\n"
" batched_gemm_reduce: Batched GEMM+Reduce\n"
" grouped_gemm: Grouped GEMM\n"
" conv_fwd: Convolution Forward\n"
" conv_fwd_bias_relu: ForwardConvolution+Bias+ReLU\n"
" conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLU+Add\n"
" conv_bwd_data: Convolution Backward Data\n"
" grouped_conv_fwd: Grouped Convolution Forward\n"
" grouped_conv_bwd_weight: Grouped Convolution Backward Weight\n"
" softmax: Softmax\n"
" reduce: Reduce\n"
" bnorm_fwd: Batchnorm forward\n");
// clang-format on
} }
int main(int argc, char* argv[]) int main(int argc, char* argv[])
...@@ -57,101 +16,15 @@ int main(int argc, char* argv[]) ...@@ -57,101 +16,15 @@ int main(int argc, char* argv[])
if(argc == 1) if(argc == 1)
{ {
print_helper_message(); print_helper_message();
return 0;
}
else if(strcmp(argv[1], "gemm") == 0)
{
return profile_gemm(argc, argv);
}
else if(strcmp(argv[1], "gemm_splitk") == 0)
{
return profile_gemm_splitk(argc, argv);
}
else if(strcmp(argv[1], "gemm_bilinear") == 0)
{
return profile_gemm_bilinear(argc, argv);
}
else if(strcmp(argv[1], "gemm_add_add_fastgelu") == 0)
{
return profile_gemm_add_add_fastgelu(argc, argv);
}
else if(strcmp(argv[1], "gemm_reduce") == 0)
{
return profile_gemm_reduce(argc, argv);
}
else if(strcmp(argv[1], "gemm_bias_add_reduce") == 0)
{
return profile_gemm_bias_add_reduce(argc, argv);
}
else if(strcmp(argv[1], "batched_gemm") == 0)
{
return profile_batched_gemm(argc, argv);
}
else if(strcmp(argv[1], "batched_gemm_gemm") == 0)
{
return profile_batched_gemm_gemm(argc, argv);
}
else if(strcmp(argv[1], "batched_gemm_add_relu_gemm_add") == 0)
{
return profile_batched_gemm_add_relu_gemm_add(argc, argv);
}
else if(strcmp(argv[1], "batched_gemm_reduce") == 0)
{
return profile_batched_gemm_reduce(argc, argv);
}
else if(strcmp(argv[1], "grouped_gemm") == 0)
{
return profile_grouped_gemm(argc, argv);
} }
else if(strcmp(argv[1], "conv_fwd") == 0) else if(const auto operation = ProfilerOperationRegistry::GetInstance().Get(argv[1]);
operation.has_value())
{ {
return profile_conv_fwd(argc, argv); return (*operation)(argc, argv);
}
else if(strcmp(argv[1], "conv_fwd_bias_relu") == 0)
{
return profile_conv_fwd_bias_relu(argc, argv);
}
else if(strcmp(argv[1], "conv_fwd_bias_relu_add") == 0)
{
return profile_conv_fwd_bias_relu_add(argc, argv);
}
else if(strcmp(argv[1], "conv_bwd_data") == 0)
{
return profile_conv_bwd_data(argc, argv);
}
else if(strcmp(argv[1], "grouped_conv_fwd") == 0)
{
return profile_grouped_conv_fwd(argc, argv);
}
else if(strcmp(argv[1], "conv_bwd_weight") == 0)
{
return profile_grouped_conv_bwd_weight(argc, argv);
}
else if(strcmp(argv[1], "reduce") == 0)
{
return profile_reduce(argc, argv);
}
else if(strcmp(argv[1], "softmax") == 0)
{
return profile_softmax(argc, argv);
}
else if(strcmp(argv[1], "layernorm") == 0)
{
return profile_layernorm(argc, argv);
}
else if(strcmp(argv[1], "groupnorm") == 0)
{
return profile_groupnorm(argc, argv);
}
else if(strcmp(argv[1], "bnorm_fwd") == 0)
{
return profile_batchnorm_forward(argc, argv);
} }
else else
{ {
print_helper_message(); std::cerr << "cannot find operation: " << argv[1] << std::endl;
return EXIT_FAILURE;
return 0;
} }
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment