Commit d5e056c7 authored by Po-Yen, Chen's avatar Po-Yen, Chen
Browse files

Add description for profiler operations

parent 8116d2b3
...@@ -197,4 +197,4 @@ int profile_batched_gemm(int argc, char* argv[]) ...@@ -197,4 +197,4 @@ int profile_batched_gemm(int argc, char* argv[])
} }
} }
REGISTER_PROFILER_OPERATION("batched_gemm", profile_batched_gemm) REGISTER_PROFILER_OPERATION("batched_gemm", "Batched GEMM", profile_batched_gemm);
...@@ -111,7 +111,7 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[]) ...@@ -111,7 +111,7 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[])
else else
{ {
printf("arg1: tensor operation (batched_gemm_add_relu_gemm_add: " printf("arg1: tensor operation (batched_gemm_add_relu_gemm_add: "
"Batched_GEMM+Add+Relu+Gemm+Add)\n"); "Batched GEMM+Add+Relu+GEMM+Add)\n");
printf("arg2: data type (1: fp16)\n"); printf("arg2: data type (1: fp16)\n");
printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] " printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] "
"= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = " "= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = "
...@@ -209,4 +209,6 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[]) ...@@ -209,4 +209,6 @@ int profile_batched_gemm_add_relu_gemm_add(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("batched_gemm_add_relu_gemm_add", profile_batched_gemm_add_relu_gemm_add) REGISTER_PROFILER_OPERATION("batched_gemm_add_relu_gemm_add",
"Batched GEMM+Add+Relu+GEMM+Add",
profile_batched_gemm_add_relu_gemm_add);
...@@ -102,7 +102,7 @@ int profile_batched_gemm_gemm(int argc, char* argv[]) ...@@ -102,7 +102,7 @@ int profile_batched_gemm_gemm(int argc, char* argv[])
} }
else else
{ {
printf("arg1: tensor operation (batched_gemm_gemm: Batched_GEMM+Gemm)\n"); printf("arg1: tensor operation (batched_gemm_gemm: Batched GEMM+GEMM)\n");
printf("arg2: data type (1: fp16)\n"); printf("arg2: data type (1: fp16)\n");
printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] " printf("arg3: matrix layout (0: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[n, o] + D1[m, o] "
"= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = E1[m, " "= E1[m, o]; 1: Relu(A0[m, k] * B0[n, k] + D0[m, n]) * B1[o, n] + D1[m, o] = E1[m, "
...@@ -181,4 +181,4 @@ int profile_batched_gemm_gemm(int argc, char* argv[]) ...@@ -181,4 +181,4 @@ int profile_batched_gemm_gemm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("batched_gemm_gemm", profile_batched_gemm_gemm) REGISTER_PROFILER_OPERATION("batched_gemm_gemm", "Batched GEMM+GEMM", profile_batched_gemm_gemm);
...@@ -27,7 +27,7 @@ int profile_batched_gemm_reduce(int argc, char* argv[]) ...@@ -27,7 +27,7 @@ int profile_batched_gemm_reduce(int argc, char* argv[])
if(argc != 15) if(argc != 15)
{ {
printf("arg1: tensor operation (batched_gemm_reduce: BatchedGEMM+Reduce)\n"); printf("arg1: tensor operation (batched_gemm_reduce: Batched GEMM+Reduce)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -153,4 +153,6 @@ int profile_batched_gemm_reduce(int argc, char* argv[]) ...@@ -153,4 +153,6 @@ int profile_batched_gemm_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("batched_gemm_reduce", profile_batched_gemm_reduce) REGISTER_PROFILER_OPERATION("batched_gemm_reduce",
"Batched GEMM+Reduce",
profile_batched_gemm_reduce);
...@@ -184,4 +184,4 @@ int profile_conv_bwd_data(int argc, char* argv[]) ...@@ -184,4 +184,4 @@ int profile_conv_bwd_data(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION("conv_bwd_data", profile_conv_bwd_data) REGISTER_PROFILER_OPERATION("conv_bwd_data", "Convolution Backward Data", profile_conv_bwd_data);
...@@ -186,4 +186,4 @@ int profile_conv_fwd(int argc, char* argv[]) ...@@ -186,4 +186,4 @@ int profile_conv_fwd(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION("conv_fwd", profile_conv_fwd) REGISTER_PROFILER_OPERATION("conv_fwd", "Convolution Forward", profile_conv_fwd);
...@@ -37,7 +37,7 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[]) ...@@ -37,7 +37,7 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[])
{ {
if(argc != 25) if(argc != 25)
{ {
printf("arg1: tensor operation (conv_fwd_bias_relu: ForwardConvolution+Bias+ReLu)\n"); printf("arg1: tensor operation (conv_fwd_bias_relu: Convolution Forward+Bias+ReLU)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -116,4 +116,6 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[]) ...@@ -116,4 +116,6 @@ int profile_conv_fwd_bias_relu(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("conv_fwd_bias_relu", profile_conv_fwd_bias_relu) REGISTER_PROFILER_OPERATION("conv_fwd_bias_relu",
"Convolution Forward+Bias+ReLU",
profile_conv_fwd_bias_relu);
...@@ -38,7 +38,7 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) ...@@ -38,7 +38,7 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
if(argc != 25) if(argc != 25)
{ {
printf( printf(
"arg1: tensor operation (conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLu+Add)\n"); "arg1: tensor operation (conv_fwd_bias_relu_add: Convolution Forward+Bias+ReLU+Add)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n"); printf("arg3: input tensor layout (0: NCHW; 1: NHWC)\n");
printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n"); printf("arg4: weight tensor layout (0: KCYX; 1: KYXC)\n");
...@@ -117,4 +117,6 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[]) ...@@ -117,4 +117,6 @@ int profile_conv_fwd_bias_relu_add(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("conv_fwd_bias_relu_add", profile_conv_fwd_bias_relu_add) REGISTER_PROFILER_OPERATION("conv_fwd_bias_relu_add",
"Convolution Forward+Bias+ReLU+Add",
profile_conv_fwd_bias_relu_add);
...@@ -186,4 +186,4 @@ int profile_gemm(int argc, char* argv[]) ...@@ -186,4 +186,4 @@ int profile_gemm(int argc, char* argv[])
} }
} }
REGISTER_PROFILER_OPERATION("gemm", profile_gemm) REGISTER_PROFILER_OPERATION("gemm", "GEMM", profile_gemm);
...@@ -152,4 +152,6 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[]) ...@@ -152,4 +152,6 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
} }
} }
REGISTER_PROFILER_OPERATION("gemm_add_add_fastgelu", profile_gemm_add_add_fastgelu) REGISTER_PROFILER_OPERATION("gemm_add_add_fastgelu",
"GEMM+Add+Add+FastGeLU",
profile_gemm_add_add_fastgelu);
...@@ -27,7 +27,7 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[]) ...@@ -27,7 +27,7 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[])
if(!(argc == 14 || argc == 15)) if(!(argc == 14 || argc == 15))
{ {
printf("arg1: tensor operation (gemm: GEMM+bias+add+Reduce)\n"); printf("arg1: tensor operation (gemm_bias_add_reduce: GEMM+Bias+Add+Reduce)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -161,4 +161,6 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[]) ...@@ -161,4 +161,6 @@ int profile_gemm_bias_add_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("gemm_bias_add_reduce", profile_gemm_bias_add_reduce) REGISTER_PROFILER_OPERATION("gemm_bias_add_reduce",
"GEMM+Bias+Add+Reduce",
profile_gemm_bias_add_reduce);
...@@ -146,4 +146,4 @@ int profile_gemm_bilinear(int argc, char* argv[]) ...@@ -146,4 +146,4 @@ int profile_gemm_bilinear(int argc, char* argv[])
} }
} }
REGISTER_PROFILER_OPERATION("gemm_bilinear", profile_gemm_bilinear) REGISTER_PROFILER_OPERATION("gemm_bilinear", "GEMM+Bilinear", profile_gemm_bilinear);
...@@ -27,7 +27,7 @@ int profile_gemm_reduce(int argc, char* argv[]) ...@@ -27,7 +27,7 @@ int profile_gemm_reduce(int argc, char* argv[])
if(!(argc == 14 || argc == 15)) if(!(argc == 14 || argc == 15))
{ {
printf("arg1: tensor operation (gemm: GEMM+Reduce)\n"); printf("arg1: tensor operation (gemm_reduce: GEMM+Reduce)\n");
printf("arg2: data type (0: fp32; 1: fp16)\n"); printf("arg2: data type (0: fp32; 1: fp16)\n");
printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); printf("arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
...@@ -148,4 +148,4 @@ int profile_gemm_reduce(int argc, char* argv[]) ...@@ -148,4 +148,4 @@ int profile_gemm_reduce(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("gemm_reduce", profile_gemm_reduce) REGISTER_PROFILER_OPERATION("gemm_reduce", "GEMM+Reduce", profile_gemm_reduce);
...@@ -148,4 +148,4 @@ int profile_gemm_splitk(int argc, char* argv[]) ...@@ -148,4 +148,4 @@ int profile_gemm_splitk(int argc, char* argv[])
} }
} }
REGISTER_PROFILER_OPERATION("gemm_splitk", profile_gemm_splitk) REGISTER_PROFILER_OPERATION("gemm_splitk", "Split-K GEMM", profile_gemm_splitk);
...@@ -26,20 +26,21 @@ enum struct ConvDataType ...@@ -26,20 +26,21 @@ enum struct ConvDataType
static void print_helper_msg() static void print_helper_msg()
{ {
std::cout << "arg1: tensor operation (conv_bwd_weight: Convolution Backward Weight\n" std::cout
<< "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n" << "arg1: tensor operation (grouped_conv_bwd_weight: Grouped Convolution Backward Weight\n"
<< " 1: Input fp16, Weight fp16, Output fp16\n" << "arg2: data type (0: Input fp32, Weight fp32, Output fp32\n"
<< " 2: Input bf16, Weight fp32, Output bf16)\n" << " 1: Input fp16, Weight fp16, Output fp16\n"
<< "arg3: tensor layout (0: Input[G, N, C, Hi, Wi], Weight[G, K, C, Y, X], Output[G, " << " 2: Input bf16, Weight fp32, Output bf16)\n"
"N, K, Ho, Wo]\n" << "arg3: tensor layout (0: Input[G, N, C, Hi, Wi], Weight[G, K, C, Y, X], Output[G, "
<< " 1: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, " "N, K, Ho, Wo]\n"
"N, Ho, Wo, K]\n" << " 1: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, "
<< "arg4: verification (0: no, 1: yes)\n" "N, Ho, Wo, K]\n"
<< "arg5: initialization (0: no init, 1: integer value, 2: decimal value)\n" << "arg4: verification (0: no, 1: yes)\n"
<< "arg6: print tensor value (0: no; 1: yes)\n" << "arg5: initialization (0: no init, 1: integer value, 2: decimal value)\n"
<< "arg7: time kernel (0: no, 1: yes)\n" << "arg6: print tensor value (0: no; 1: yes)\n"
<< ck::utils::conv::get_conv_param_parser_helper_msg() << " SplitK\n" << "arg7: time kernel (0: no, 1: yes)\n"
<< std::endl; << ck::utils::conv::get_conv_param_parser_helper_msg() << " SplitK\n"
<< std::endl;
} }
} // namespace } // namespace
...@@ -176,4 +177,6 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[]) ...@@ -176,4 +177,6 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION("grouped_conv_bwd_weight", profile_grouped_conv_bwd_weight) REGISTER_PROFILER_OPERATION("grouped_conv_bwd_weight",
"Grouped Convolution Backward Weight",
profile_grouped_conv_bwd_weight);
...@@ -254,4 +254,6 @@ int profile_grouped_conv_fwd(int argc, char* argv[]) ...@@ -254,4 +254,6 @@ int profile_grouped_conv_fwd(int argc, char* argv[])
return 1; return 1;
} }
REGISTER_PROFILER_OPERATION("grouped_conv_fwd", profile_grouped_conv_fwd) REGISTER_PROFILER_OPERATION("grouped_conv_fwd",
"Grouped Convolution Forward",
profile_grouped_conv_fwd);
...@@ -163,4 +163,4 @@ int profile_grouped_gemm(int argc, char* argv[]) ...@@ -163,4 +163,4 @@ int profile_grouped_gemm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("grouped_gemm", profile_grouped_gemm) REGISTER_PROFILER_OPERATION("grouped_gemm", "Grouped GEMM", profile_grouped_gemm);
...@@ -46,7 +46,7 @@ struct GroupnormArgParser ...@@ -46,7 +46,7 @@ struct GroupnormArgParser
void print_help_groupnorm() void print_help_groupnorm()
{ {
std::cout << "arg1: tensor operation (groupnorm: Group normalization)\n" std::cout << "arg1: tensor operation (groupnorm: Group Normalization)\n"
<< "arg2: data type (0: fp16; 1: fp32)\n" << "arg2: data type (0: fp16; 1: fp32)\n"
<< "arg3: verification (0: no; 1: yes)\n" << "arg3: verification (0: no; 1: yes)\n"
<< "arg4: initialization (0: no init; 1: integer value; 2: decimal value)\n" << "arg4: initialization (0: no init; 1: integer value; 2: decimal value)\n"
...@@ -106,4 +106,4 @@ int profile_groupnorm(int argc, char* argv[]) ...@@ -106,4 +106,4 @@ int profile_groupnorm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("groupnorm", profile_groupnorm) REGISTER_PROFILER_OPERATION("groupnorm", "Group Normalization", profile_groupnorm);
...@@ -98,4 +98,4 @@ int profile_layernorm(int argc, char* argv[]) ...@@ -98,4 +98,4 @@ int profile_layernorm(int argc, char* argv[])
return 0; return 0;
} }
REGISTER_PROFILER_OPERATION("layernorm", profile_layernorm) REGISTER_PROFILER_OPERATION("layernorm", "Layer Normalization", profile_layernorm);
...@@ -431,4 +431,4 @@ int profile_reduce(int argc, char* argv[]) ...@@ -431,4 +431,4 @@ int profile_reduce(int argc, char* argv[])
return (0); return (0);
}; };
REGISTER_PROFILER_OPERATION("reduce", profile_reduce) REGISTER_PROFILER_OPERATION("reduce", "Reduce", profile_reduce);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment