Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
29dcb956
Unverified
Commit
29dcb956
authored
Feb 08, 2024
by
Illia Silin
Committed by
GitHub
Feb 08, 2024
Browse files
Merge pull request #33 from ROCm/lwpck-1292
Merge from the public repo.
parents
29deceb6
cbcc844e
Changes
393
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1125 additions
and
320 deletions
+1125
-320
profiler/include/profiler/profile_transpose_impl.hpp
profiler/include/profiler/profile_transpose_impl.hpp
+4
-8
profiler/src/CMakeLists.txt
profiler/src/CMakeLists.txt
+15
-1
profiler/src/profile_gemm.cpp
profiler/src/profile_gemm.cpp
+23
-5
profiler/src/profile_gemm_add.cpp
profiler/src/profile_gemm_add.cpp
+139
-0
profiler/src/profile_gemm_add_fastgelu.cpp
profiler/src/profile_gemm_add_fastgelu.cpp
+15
-2
profiler/src/profile_gemm_add_relu.cpp
profiler/src/profile_gemm_add_relu.cpp
+139
-0
profiler/src/profile_gemm_add_silu.cpp
profiler/src/profile_gemm_add_silu.cpp
+139
-0
profiler/src/profile_gemm_splitk.cpp
profiler/src/profile_gemm_splitk.cpp
+15
-2
profiler/src/profile_grouped_gemm.cpp
profiler/src/profile_grouped_gemm.cpp
+31
-7
profiler/src/profile_groupnorm_bwd_data.cpp
profiler/src/profile_groupnorm_bwd_data.cpp
+104
-0
profiler/src/profile_groupnorm_bwd_gamma_beta.cpp
profiler/src/profile_groupnorm_bwd_gamma_beta.cpp
+104
-0
profiler/src/profile_groupnorm_fwd.cpp
profiler/src/profile_groupnorm_fwd.cpp
+1
-1
profiler/src/profile_layernorm_bwd_data.cpp
profiler/src/profile_layernorm_bwd_data.cpp
+112
-0
profiler/src/profile_layernorm_bwd_gamma_beta.cpp
profiler/src/profile_layernorm_bwd_gamma_beta.cpp
+112
-0
profiler/src/profile_layernorm_fwd.cpp
profiler/src/profile_layernorm_fwd.cpp
+2
-2
profiler/src/profile_transpose.cpp
profiler/src/profile_transpose.cpp
+112
-0
script/clang-format-overwrite.sh
script/clang-format-overwrite.sh
+1
-1
script/parse_perf_data.py
script/parse_perf_data.py
+0
-290
script/process_perf_data.py
script/process_perf_data.py
+5
-1
script/profile_mixed_gemm.sh
script/profile_mixed_gemm.sh
+52
-0
No files found.
profiler/include/profiler/profile_transpose_impl.hpp
View file @
29dcb956
...
...
@@ -25,7 +25,7 @@ namespace ck {
namespace
profiler
{
template
<
typename
HostTensorA
,
typename
HostTensorB
,
typename
Functor
>
void
host_elementwise4D
(
HostTensorB
&
B_n
c
hw
d
,
const
HostTensorA
&
A_ncdhw
,
Functor
functor
)
void
host_elementwise4D
(
HostTensorB
&
B_n
d
hw
c
,
const
HostTensorA
&
A_ncdhw
,
Functor
functor
)
{
for
(
std
::
size_t
n
=
0
;
n
<
A_ncdhw
.
mDesc
.
GetLengths
()[
0
];
++
n
)
for
(
std
::
size_t
c
=
0
;
c
<
A_ncdhw
.
mDesc
.
GetLengths
()[
1
];
++
c
)
...
...
@@ -34,7 +34,7 @@ void host_elementwise4D(HostTensorB& B_nchwd, const HostTensorA& A_ncdhw, Functo
for
(
std
::
size_t
w
=
0
;
w
<
A_ncdhw
.
mDesc
.
GetLengths
()[
4
];
++
w
)
{
auto
a_val
=
A_ncdhw
(
n
,
c
,
d
,
h
,
w
);
functor
(
B_n
c
hw
d
(
n
,
c
,
h
,
w
,
d
),
a_val
);
functor
(
B_n
d
hw
c
(
n
,
d
,
h
,
w
,
c
),
a_val
);
}
}
...
...
@@ -77,8 +77,6 @@ bool profile_transpose_impl(int do_verification,
using
ElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
// const auto element_op = ElementOp{};
DeviceMem
a_device_buf
(
sizeof
(
ADataType
)
*
a
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
b_device_buf
(
sizeof
(
BDataType
)
*
b
.
mDesc
.
GetElementSpaceSize
());
...
...
@@ -118,6 +116,7 @@ bool profile_transpose_impl(int do_verification,
// re-init C to zero before profiling next kernel
b_device_buf
.
SetZero
();
// run for verification
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
false
});
if
(
do_verification
)
...
...
@@ -136,6 +135,7 @@ bool profile_transpose_impl(int do_verification,
std
::
string
op_name
=
op_ptr
->
GetTypeString
();
// run for timing purposes
float
ave_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
...
...
@@ -153,10 +153,6 @@ bool profile_transpose_impl(int do_verification,
std
::
cout
<<
"Perf: "
<<
std
::
setw
(
10
)
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
op_name
<<
std
::
endl
;
// pass = pass & ck::utils::check_err(b_device_result, b_host_result);
pass
&=
ck
::
utils
::
check_err
(
b
.
mData
,
host_b
.
mData
,
"Error: Incorrect results b"
,
1e-3
,
1e-3
);
if
(
tflops
>
best_tflops
)
{
best_op_name
=
op_name
;
...
...
profiler/src/CMakeLists.txt
View file @
29dcb956
...
...
@@ -16,7 +16,11 @@ set(PROFILER_SOURCES
profile_grouped_conv_fwd.cpp
profile_grouped_conv_bwd_weight.cpp
profile_reduce.cpp
profile_groupnorm_bwd_data.cpp
profile_groupnorm_fwd.cpp
profile_layernorm_bwd_data.cpp
profile_layernorm_bwd_gamma_beta.cpp
profile_groupnorm_bwd_gamma_beta.cpp
profile_layernorm_fwd.cpp
profile_max_pool3d_fwd.cpp
profile_avg_pool3d_bwd.cpp
...
...
@@ -27,6 +31,7 @@ set(PROFILER_SOURCES
profile_batchnorm_infer.cpp
profile_grouped_conv_bwd_data.cpp
profile_conv_tensor_rearrange.cpp
profile_transpose.cpp
)
if
(
DL_KERNELS
)
...
...
@@ -38,7 +43,10 @@ if(DTYPES MATCHES "fp16" OR NOT DEFINED DTYPES)
list
(
APPEND PROFILER_SOURCES profile_gemm_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_streamk.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_bilinear.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_silu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_add_fastgelu.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_add_relu_add_layernorm.cpp
)
list
(
APPEND PROFILER_SOURCES profile_batched_gemm_add_relu_gemm_add.cpp
)
...
...
@@ -56,7 +64,7 @@ set(PROFILER_EXECUTABLE ckProfiler)
add_executable
(
${
PROFILER_EXECUTABLE
}
${
PROFILER_SOURCES
}
)
target_compile_options
(
${
PROFILER_EXECUTABLE
}
PRIVATE -Wno-global-constructors
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE utility
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE utility
getopt::getopt
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_splitk_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_multiply_instance
)
...
...
@@ -78,6 +86,8 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv3d_bwd_w
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_conv2d_fwd_bias_relu_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_fwd_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_normalization_bwd_gamma_beta_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_softmax_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batchnorm_instance
)
...
...
@@ -88,6 +98,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_grouped_conv2d_bwd_d
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_bwd_data_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_image_to_column_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_column_to_image_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_transpose_instance
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_bilinear_instance
)
...
...
@@ -101,7 +112,10 @@ if(DL_KERNELS)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_fastgelu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_silu_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_relu_add_layernorm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_add_add_fastgelu_instance
)
...
...
profiler/src/profile_gemm.cpp
View file @
29dcb956
...
...
@@ -42,12 +42,15 @@ static void print_helper_msg()
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg7: time kernel (0: no, 1: yes)
\n
"
<<
"arg8 to 13: M, N, K, StrideA, StrideB, StrideC
\n
"
<<
"optional:
\n
"
<<
"arg14: number of warm-up cycles (default 1)
\n
"
<<
"arg15: number of iterations (default 10)
\n
"
<<
std
::
endl
;
}
int
profile_gemm
(
int
argc
,
char
*
argv
[])
{
if
(
argc
!=
14
)
if
(
argc
!=
14
&&
argc
!=
16
)
{
print_helper_msg
();
exit
(
1
);
...
...
@@ -68,6 +71,13 @@ int profile_gemm(int argc, char* argv[])
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideC
=
std
::
stoi
(
argv
[
13
]);
int
n_warmup
=
1
;
int
n_iter
=
10
;
if
(
argc
==
16
)
{
n_warmup
=
std
::
stoi
(
argv
[
14
]);
n_iter
=
std
::
stoi
(
argv
[
15
]);
}
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
#ifdef CK_ENABLE_BF16
...
...
@@ -120,13 +130,21 @@ int profile_gemm(int argc, char* argv[])
K
,
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideC
<
0
)
?
DefaultStrideC
:
StrideC
);
(
StrideC
<
0
)
?
DefaultStrideC
:
StrideC
,
n_warmup
,
n_iter
);
return
pass
?
0
:
1
;
};
if
(
false
)
;
if
(
data_type
!=
GemmDataType
::
F32_F32_F32
&&
data_type
!=
GemmDataType
::
F16_F16_F16
&&
data_type
!=
GemmDataType
::
BF16_BF16_BF16
&&
data_type
!=
GemmDataType
::
INT8_INT8_INT8
&&
data_type
!=
GemmDataType
::
F8_F8_F8
)
{
// dummy clause before the else clauses for different data types
std
::
cout
<<
"Gemm: this data_type is not implemented"
<<
std
::
endl
;
return
1
;
}
#ifdef CK_ENABLE_FP32
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
...
...
@@ -219,7 +237,7 @@ int profile_gemm(int argc, char* argv[])
#endif
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
std
::
cout
<<
"
Gemm:
this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
...
...
profiler/src/profile_gemm_add.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_add_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add"
#define OP_DESC "GEMM+Add"
using
INT8
=
int8_t
;
using
BF16
=
ck
::
bhalf_t
;
int
profile_gemm_add
(
int
argc
,
char
*
argv
[])
{
enum
struct
MatrixLayout
{
MK_KN_MN_MN
,
// 0
MK_NK_MN_MN
,
// 1
KM_KN_MN_MN
,
// 2
KM_NK_MN_MN
,
// 3
};
enum
struct
MatrixDataType
{
F16_INT8_F16_F16
,
// 0
BF16_INT8_BF16_BF16
,
// 1
};
if
(
argc
!=
15
)
{
// clang-format off
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: f16&i8 1: bf16&i8)
\n
"
);
printf
(
"arg3: matrix layout (0: E[m, n] = ReLU(A[m, k] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 1: E[m, n] = ReLU(A[m, k] * B[n, k] + D0[m, n]);
\n
"
);
printf
(
" 2: E[m, n] = ReLU(A[k, m] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 3: E[m, n] = ReLU(A[k, m] * B[n, k] + D0[m, n]))
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 14: M, N, K, StrideA, StrideB, StrideD0, StrideE
\n
"
);
// clang-format on
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
MatrixDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
MatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
M
=
std
::
stoi
(
argv
[
8
]);
const
int
N
=
std
::
stoi
(
argv
[
9
]);
const
int
K
=
std
::
stoi
(
argv
[
10
]);
const
int
StrideA
=
std
::
stoi
(
argv
[
11
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideD0
=
std
::
stoi
(
argv
[
13
]);
const
int
StrideE
=
std
::
stoi
(
argv
[
14
]);
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
// using Col = ck::tensor_layout::gemm::ColumnMajor;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
,
auto
acc_type
,
auto
d0_type
,
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
d0_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
D0DataType
=
decltype
(
d0_type
);
using
EDataType
=
decltype
(
e_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
D0Layout
=
decltype
(
d0_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D0Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_add_impl
<
ADataType
,
BDataType
,
AccDataType
,
D0DataType
,
EDataType
,
ALayout
,
BLayout
,
D0Layout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
M
,
N
,
K
,
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideD0
<
0
)
?
DefaultStrideD0
:
StrideD0
,
(
StrideE
<
0
)
?
DefaultStrideE
:
StrideE
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
MatrixDataType
::
F16_INT8_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
F16
{},
INT8
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
BF16_INT8_BF16_BF16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
BF16
{},
INT8
{},
F32
{},
BF16
{},
BF16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_gemm_add
);
profiler/src/profile_gemm_add_fastgelu.cpp
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
...
...
@@ -12,6 +12,9 @@
#define OP_NAME "gemm_add_fastgelu"
#define OP_DESC "GEMM+Add+FastGeLU"
using
INT8
=
int8_t
;
using
BF16
=
ck
::
bhalf_t
;
int
profile_gemm_add_fastgelu
(
int
argc
,
char
*
argv
[])
{
enum
struct
MatrixLayout
...
...
@@ -28,13 +31,15 @@ int profile_gemm_add_fastgelu(int argc, char* argv[])
F16_F16_F16_F16
,
// 1
BF16_BF16_BF16_BF16
,
// 2
INT8_INT8_INT8_INT8
,
// 3
F16_INT8_F16_F16
,
// 4
BF16_INT8_BF16_BF16
,
// 5
};
if
(
argc
!=
15
)
{
// clang-format off
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8
; 4: f16&i8 5: bf16&i8
)
\n
"
);
printf
(
"arg3: matrix layout (0: E[m, n] = FastGeLU(A[m, k] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 1: E[m, n] = FastGeLU(A[m, k] * B[n, k] + D0[m, n]);
\n
"
);
printf
(
" 2: E[m, n] = FastGeLU(A[k, m] * B[k, n] + D0[m, n]);
\n
"
);
...
...
@@ -135,6 +140,14 @@ int profile_gemm_add_fastgelu(int argc, char* argv[])
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_INT8_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
F16
{},
INT8
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
BF16_INT8_BF16_BF16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
BF16
{},
INT8
{},
F32
{},
BF16
{},
BF16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
...
...
profiler/src/profile_gemm_add_relu.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_add_relu_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_relu"
#define OP_DESC "GEMM+Add+ReLU"
using
INT8
=
int8_t
;
using
BF16
=
ck
::
bhalf_t
;
int
profile_gemm_add_relu
(
int
argc
,
char
*
argv
[])
{
enum
struct
MatrixLayout
{
MK_KN_MN_MN
,
// 0
MK_NK_MN_MN
,
// 1
KM_KN_MN_MN
,
// 2
KM_NK_MN_MN
,
// 3
};
enum
struct
MatrixDataType
{
F16_INT8_F16_F16
,
// 0
BF16_INT8_BF16_BF16
,
// 1
};
if
(
argc
!=
15
)
{
// clang-format off
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: f16&i8 1: bf16&i8)
\n
"
);
printf
(
"arg3: matrix layout (0: E[m, n] = ReLU(A[m, k] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 1: E[m, n] = ReLU(A[m, k] * B[n, k] + D0[m, n]);
\n
"
);
printf
(
" 2: E[m, n] = ReLU(A[k, m] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 3: E[m, n] = ReLU(A[k, m] * B[n, k] + D0[m, n]))
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 14: M, N, K, StrideA, StrideB, StrideD0, StrideE
\n
"
);
// clang-format on
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
MatrixDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
MatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
M
=
std
::
stoi
(
argv
[
8
]);
const
int
N
=
std
::
stoi
(
argv
[
9
]);
const
int
K
=
std
::
stoi
(
argv
[
10
]);
const
int
StrideA
=
std
::
stoi
(
argv
[
11
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideD0
=
std
::
stoi
(
argv
[
13
]);
const
int
StrideE
=
std
::
stoi
(
argv
[
14
]);
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
// using Col = ck::tensor_layout::gemm::ColumnMajor;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
,
auto
acc_type
,
auto
d0_type
,
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
d0_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
D0DataType
=
decltype
(
d0_type
);
using
EDataType
=
decltype
(
e_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
D0Layout
=
decltype
(
d0_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D0Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_add_relu_impl
<
ADataType
,
BDataType
,
AccDataType
,
D0DataType
,
EDataType
,
ALayout
,
BLayout
,
D0Layout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
M
,
N
,
K
,
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideD0
<
0
)
?
DefaultStrideD0
:
StrideD0
,
(
StrideE
<
0
)
?
DefaultStrideE
:
StrideE
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
MatrixDataType
::
F16_INT8_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
F16
{},
INT8
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
BF16_INT8_BF16_BF16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
BF16
{},
INT8
{},
F32
{},
BF16
{},
BF16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_gemm_add_relu
);
profiler/src/profile_gemm_add_silu.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_add_silu_impl.hpp"
#include "profiler_operation_registry.hpp"
#define OP_NAME "gemm_add_silu"
#define OP_DESC "GEMM+Add+SiLU"
using
INT8
=
int8_t
;
using
BF16
=
ck
::
bhalf_t
;
int
profile_gemm_add_silu
(
int
argc
,
char
*
argv
[])
{
enum
struct
MatrixLayout
{
MK_KN_MN_MN
,
// 0
MK_NK_MN_MN
,
// 1
KM_KN_MN_MN
,
// 2
KM_NK_MN_MN
,
// 3
};
enum
struct
MatrixDataType
{
F16_INT8_F16_F16
,
// 0
BF16_INT8_BF16_BF16
,
// 1
};
if
(
argc
!=
15
)
{
// clang-format off
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: f16&i8 1: bf16&i8)
\n
"
);
printf
(
"arg3: matrix layout (0: E[m, n] = ReLU(A[m, k] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 1: E[m, n] = ReLU(A[m, k] * B[n, k] + D0[m, n]);
\n
"
);
printf
(
" 2: E[m, n] = ReLU(A[k, m] * B[k, n] + D0[m, n]);
\n
"
);
printf
(
" 3: E[m, n] = ReLU(A[k, m] * B[n, k] + D0[m, n]))
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 14: M, N, K, StrideA, StrideB, StrideD0, StrideE
\n
"
);
// clang-format on
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
MatrixDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
MatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
M
=
std
::
stoi
(
argv
[
8
]);
const
int
N
=
std
::
stoi
(
argv
[
9
]);
const
int
K
=
std
::
stoi
(
argv
[
10
]);
const
int
StrideA
=
std
::
stoi
(
argv
[
11
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideD0
=
std
::
stoi
(
argv
[
13
]);
const
int
StrideE
=
std
::
stoi
(
argv
[
14
]);
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
// using Col = ck::tensor_layout::gemm::ColumnMajor;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
,
auto
acc_type
,
auto
d0_type
,
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
d0_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
D0DataType
=
decltype
(
d0_type
);
using
EDataType
=
decltype
(
e_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
D0Layout
=
decltype
(
d0_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D0Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_add_silu_impl
<
ADataType
,
BDataType
,
AccDataType
,
D0DataType
,
EDataType
,
ALayout
,
BLayout
,
D0Layout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
M
,
N
,
K
,
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideD0
<
0
)
?
DefaultStrideD0
:
StrideD0
,
(
StrideE
<
0
)
?
DefaultStrideE
:
StrideE
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
MatrixDataType
::
F16_INT8_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
F16
{},
INT8
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
BF16_INT8_BF16_BF16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
BF16
{},
INT8
{},
F32
{},
BF16
{},
BF16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_gemm_add_silu
);
profiler/src/profile_gemm_splitk.cpp
View file @
29dcb956
...
...
@@ -33,7 +33,7 @@ enum struct GemmDataType
int
profile_gemm_splitk
(
int
argc
,
char
*
argv
[])
{
if
(
argc
!=
15
)
if
(
argc
!=
15
&&
argc
!=
17
)
{
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8; 4: f8@f16; 5: f16@f8; 6: f16, "
...
...
@@ -48,6 +48,9 @@ int profile_gemm_splitk(int argc, char* argv[])
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 13: M, N, K, StrideA, StrideB, StrideC
\n
"
);
printf
(
"arg14: split k into mulitiple batch
\n
"
);
printf
(
"optional:
\n
"
);
printf
(
"arg15: number of warm-up cycles (default 1)
\n
"
);
printf
(
"arg16: number of iterations (default 10)
\n
"
);
exit
(
1
);
}
...
...
@@ -67,6 +70,14 @@ int profile_gemm_splitk(int argc, char* argv[])
const
int
StrideC
=
std
::
stoi
(
argv
[
13
]);
const
int
KBatch
=
std
::
stoi
(
argv
[
14
]);
int
n_warmup
=
1
;
int
n_iter
=
10
;
if
(
argc
==
17
)
{
n_warmup
=
std
::
stoi
(
argv
[
15
]);
n_iter
=
std
::
stoi
(
argv
[
16
]);
}
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
#if defined CK_ENABLE_FP8
...
...
@@ -117,7 +128,9 @@ int profile_gemm_splitk(int argc, char* argv[])
(
StrideA
<
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideC
<
0
)
?
DefaultStrideC
:
StrideC
,
KBatch
);
KBatch
,
n_warmup
,
n_iter
);
return
pass
?
0
:
1
;
};
...
...
profiler/src/profile_grouped_gemm.cpp
View file @
29dcb956
...
...
@@ -69,7 +69,10 @@ int profile_grouped_gemm(int argc, char* argv[])
<<
"arg7: time kernel (0=n0, 1=yes)
\n
"
<<
"arg8 to 13: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 "
"64,64 64,64 128,128)
\n
"
<<
"arg15: kbatch value (default 4)
\n
"
<<
"arg15: kbatch value (default 1)
\n
"
<<
"optional:
\n
"
<<
"arg16: number of warm-up cycles (default 1)
\n
"
<<
"arg17: number of iterations (default 10)
\n
"
<<
std
::
endl
;
exit
(
1
);
...
...
@@ -90,6 +93,15 @@ int profile_grouped_gemm(int argc, char* argv[])
const
auto
StrideBs
=
argToIntArray
(
argv
[
12
]);
const
auto
StrideCs
=
argToIntArray
(
argv
[
13
]);
const
int
kbatch
=
argc
==
15
?
std
::
stoi
(
argv
[
14
])
:
1
;
int
n_warmup
=
1
;
int
n_iter
=
10
;
if
(
argc
==
17
)
{
n_warmup
=
std
::
stoi
(
argv
[
16
]);
n_iter
=
std
::
stoi
(
argv
[
17
]);
}
#ifdef CK_ENABLE_FP16
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
...
...
@@ -109,7 +121,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
...
...
@@ -129,7 +143,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
...
...
@@ -149,7 +165,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
...
...
@@ -169,7 +187,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
F8_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
...
...
@@ -189,7 +209,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
if
(
data_type
==
GemmDataType
::
F16_F8_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
...
...
@@ -209,7 +231,9 @@ int profile_grouped_gemm(int argc, char* argv[])
StrideAs
,
StrideBs
,
StrideCs
,
kbatch
);
kbatch
,
n_warmup
,
n_iter
);
}
else
{
...
...
profiler/src/profile_groupnorm_bwd_data.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_groupnorm_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
groupnormBwdDataArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_groupnorm_bwd_data
()
{
// eg: ckProfiler groupnorm_bwd_data 1 0 2 0 1 --length 1 16 16 32 40
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1 16 16 32 40)
\n
"
<<
std
::
endl
;
}
int
profile_groupnorm_bwd_data
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_groupnorm_bwd_data
();
return
0
;
}
groupnormBwdDataArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F32
=
float
;
if
(
length
.
size
()
==
5
)
{
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_groupnorm_bwd_data_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"length should be 5"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"groupnorm_bwd_data"
,
"Group Normalization"
,
profile_groupnorm_bwd_data
);
profiler/src/profile_groupnorm_bwd_gamma_beta.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_groupnorm_bwd_gamma_beta_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
groupnormBwdGammaBetaArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_groupnorm_bwd_gamma_beta
()
{
// eg: ckProfiler groupnorm_bwd_gamma_beta 1 0 2 0 1 --length 1 16 16 32 40
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1 16 16 32 40)
\n
"
<<
std
::
endl
;
}
int
profile_groupnorm_bwd_gamma_beta
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_groupnorm_bwd_gamma_beta
();
return
0
;
}
groupnormBwdGammaBetaArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F32
=
float
;
if
(
length
.
size
()
==
5
)
{
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_groupnorm_bwd_gamma_beta_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"length should be 5"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"groupnorm_bwd_gamma_beta"
,
"Group Normalization"
,
profile_groupnorm_bwd_gamma_beta
);
profiler/src/profile_groupnorm_fwd.cpp
View file @
29dcb956
...
...
@@ -98,7 +98,7 @@ int profile_groupnorm(int argc, char* argv[])
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
ck
::
profiler
::
profile_groupnorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
,
false
>
(
ck
::
profiler
::
profile_groupnorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
,
false
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
...
...
profiler/src/profile_layernorm_bwd_data.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_layernorm_bwd_data_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
layernormBwdDataArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_layernorm_bwd_data
()
{
// eg: ckProfiler layernorm_bwd_data 0 0 2 0 1 --length 1502 4096
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1024 1024)
\n
"
<<
std
::
endl
;
}
int
profile_layernorm_bwd_data
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_layernorm_bwd_data
();
return
0
;
}
layernormBwdDataArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
if
(
length
.
size
()
==
2
)
{
constexpr
int
rank
=
2
;
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
ck
::
profiler
::
profile_layernorm_bwd_data_impl
<
F16
,
F16
,
F16
,
F16
,
F32
,
F16
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_layernorm_bwd_data_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"layernorm_bwd_data"
,
"Layer Normalization"
,
profile_layernorm_bwd_data
);
profiler/src/profile_layernorm_bwd_gamma_beta.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <unordered_map>
#include "profiler/data_type_enum.hpp"
#include "profiler/profile_layernorm_bwd_gamma_beta_impl.hpp"
#include "profiler_operation_registry.hpp"
using
ck
::
index_t
;
struct
layernormBwdGammaBetaArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"length"
,
{}}};
bool
parse_opt
(
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
void
print_help_layernorm_bwd_gamma_beta
()
{
// eg: ckProfiler layernorm_bwd_gamma_beta 0 0 2 0 1 --length 1502 4096
std
::
cout
<<
"arg1: data type (0: fp16; 1: fp32)
\n
"
<<
"arg2: verification (0: no; 1: yes)
\n
"
<<
"arg3: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg4: print tensor value (0: no; 1: yes)
\n
"
<<
"arg5: time kernel (0=no, 1=yes)
\n
"
<<
"--length: tensor extents (e.g, --length 1024 1024)
\n
"
<<
std
::
endl
;
}
int
profile_layernorm_bwd_gamma_beta
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<=
2
)
{
print_help_layernorm_bwd_gamma_beta
();
return
0
;
}
layernormBwdGammaBetaArgParser
arg_parser
;
// short unnamed options
const
ck
::
DataTypeEnum
data_type
=
static_cast
<
ck
::
DataTypeEnum
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
// parse the long options
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
index_t
>
length
=
arg_parser
.
long_opts
[
"length"
];
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
if
(
length
.
size
()
==
2
)
{
constexpr
int
rank
=
2
;
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
ck
::
profiler
::
profile_layernorm_bwd_gamma_beta_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F16
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
{
ck
::
profiler
::
profile_layernorm_bwd_gamma_beta_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
}
else
{
throw
std
::
runtime_error
(
"not implemented yet"
);
}
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"layernorm_bwd_gamma_beta"
,
"Layer Normalization"
,
profile_layernorm_bwd_gamma_beta
);
profiler/src/profile_layernorm_fwd.cpp
View file @
29dcb956
...
...
@@ -104,7 +104,7 @@ int profile_layernorm(int argc, char* argv[])
if
(
data_type
==
ck
::
DataTypeEnum
::
Half
)
{
ck
::
profiler
::
profile_layernorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
32
,
false
,
rank
>
(
ck
::
profiler
::
profile_layernorm_impl
<
F16
,
F16
,
F16
,
F32
,
F16
,
F
16
,
false
,
rank
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
length
);
}
else
if
(
data_type
==
ck
::
DataTypeEnum
::
Float
)
...
...
@@ -125,4 +125,4 @@ int profile_layernorm(int argc, char* argv[])
return
0
;
}
REGISTER_PROFILER_OPERATION
(
"layernorm"
,
"Layer Normalization"
,
profile_layernorm
);
REGISTER_PROFILER_OPERATION
(
"layernorm
_fwd
"
,
"Layer Normalization"
,
profile_layernorm
);
profiler/src/profile_transpose.cpp
0 → 100644
View file @
29dcb956
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_transpose_impl.hpp"
#include "profiler_operation_registry.hpp"
enum
struct
DataType
{
F32_F32_F32_F32_F32
,
// 0
F16_F16_F16_F16_F16
,
// 1
};
#define OP_NAME "transpose"
#define OP_DESC "Transpose"
struct
TransposeArgParser
{
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
int
>>
long_opts
=
{{
"lengths"
,
{}}};
bool
parse_opt
(
const
int
argc
,
char
*
argv
[],
const
std
::
string
&
key
,
int
i
)
{
if
(
std
::
string
(
"--"
)
+
key
==
argv
[
i
])
{
const
int
pos
=
i
;
while
(
++
i
<
argc
&&
argv
[
i
][
0
]
!=
'-'
)
{}
int
end
=
i
;
for
(
int
j
=
pos
+
1
;
j
<
end
;
j
++
)
{
long_opts
[
key
].
push_back
(
std
::
stoi
(
argv
[
j
]));
}
return
true
;
}
return
false
;
}
void
operator
()(
int
argc
,
char
*
argv
[])
{
for
(
auto
&
kv
:
long_opts
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
parse_opt
(
argc
,
argv
,
kv
.
first
,
i
))
break
;
}
}
}
};
static
void
print_helper_msg
()
{
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16)
\n
"
);
printf
(
"arg3: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg4: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg5: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg6: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg7: --lengths: N, C, D, H, W
\n
"
);
}
int
profile_transpose
(
int
argc
,
char
*
argv
[])
{
if
(
argc
!=
7
)
{
print_helper_msg
();
exit
(
1
);
}
TransposeArgParser
arg_parser
;
const
auto
data_type
=
static_cast
<
DataType
>
(
std
::
stoi
(
argv
[
2
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
3
]);
const
int
init_method
=
std
::
stoi
(
argv
[
4
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
5
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
6
]);
arg_parser
(
argc
,
argv
);
const
std
::
vector
<
ck
::
index_t
>
lengths
=
arg_parser
.
long_opts
[
"lengths"
];
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
constexpr
ck
::
index_t
NumDim
=
5
;
bool
pass
=
ck
::
profiler
::
profile_transpose_impl
<
ADataType
,
BDataType
,
NumDim
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
lengths
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
DataType
::
F32_F32_F32_F32_F32
)
{
return
profile
(
F32
{},
F32
{});
}
else
if
(
data_type
==
DataType
::
F16_F16_F16_F16_F16
)
{
return
profile
(
F16
{},
F16
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_transpose
);
script/clang-format-overwrite.sh
View file @
29dcb956
#
find . -name deps -prune -o -name build -prune -o -iname '*.h' -o -iname '*.hpp' -o -iname '*.cpp' -o -iname '*.h.in' -o -iname '*.hpp.in' -o -iname '*.cpp.in' -o -iname '*.cl' -o -iname '*.cuh' -o -iname '*.cu' -o -iname '*.inc' | xargs -n 1 -P 16 -I{} -t sh -c 'clang-format-12 -i -style=file {}'
find
.
-name
deps
-prune
-o
-name
build
-prune
-o
-iname
'*.h'
-o
-iname
'*.hpp'
-o
-iname
'*.cpp'
-o
-iname
'*.h.in'
-o
-iname
'*.hpp.in'
-o
-iname
'*.cpp.in'
-o
-iname
'*.cl'
-o
-iname
'*.cuh'
-o
-iname
'*.cu'
-o
-iname
'*.inc'
| xargs
-n
1
-P
16
-I
{}
-t
sh
-c
'clang-format-12 -i -style=file {}'
git status
--porcelain
|
awk
'$1 != "D" && (match($2, "\\.cpp|hpp|inc")) {print $2}'
| xargs
-n
1
-P
16
-I
{}
-t
sh
-c
'clang-format-12 -i -style=file {}'
script/parse_perf_data.py
deleted
100644 → 0
View file @
29deceb6
#!/usr/bin/env python3
import
os
,
io
,
argparse
,
datetime
,
re
import
numpy
as
np
import
sqlalchemy
from
sqlalchemy.types
import
NVARCHAR
,
Float
,
Integer
import
pymysql
import
pandas
as
pd
from
sshtunnel
import
SSHTunnelForwarder
def
print_to_string
(
*
args
,
**
kwargs
):
output
=
io
.
StringIO
()
print
(
*
args
,
file
=
output
,
**
kwargs
)
contents
=
output
.
getvalue
()
output
.
close
()
return
contents
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Parse results from tf benchmark runs'
)
parser
.
add_argument
(
'filename'
,
type
=
str
,
help
=
'Log file to prase or directory containing log files'
)
args
=
parser
.
parse_args
()
files
=
[]
if
os
.
path
.
isdir
(
args
.
filename
):
all_files
=
os
.
listdir
(
args
.
filename
)
for
name
in
all_files
:
if
not
'log'
in
name
:
continue
files
.
append
(
os
.
path
.
join
(
args
.
filename
,
name
))
else
:
files
=
[
args
.
filename
]
args
.
files
=
files
return
args
def
main
():
args
=
parse_args
()
tests
=
[]
kernels
=
[]
tflops
=
[]
dtype
=
[]
alayout
=
[]
blayout
=
[]
M
=
[]
N
=
[]
K
=
[]
StrideA
=
[]
StrideB
=
[]
StrideC
=
[]
#parse results, get the Tflops value for "Best Perf" kernels
glue
=
""
for
filename
in
args
.
files
:
for
line
in
open
(
filename
):
if
'Branch name'
in
line
:
lst
=
line
.
split
()
branch_name
=
lst
[
2
]
if
'On branch'
in
line
:
lst
=
line
.
split
()
branch_name
=
lst
[
2
]
if
'Node name'
in
line
:
lst
=
line
.
split
()
node_id
=
lst
[
2
]
if
'GPU_arch'
in
line
:
lst
=
line
.
split
()
gpu_arch
=
lst
[
2
]
if
'HIP version'
in
line
:
lst
=
line
.
split
()
hip_vers
=
lst
[
2
]
if
'Compute Unit'
in
line
:
lst
=
line
.
split
()
compute_units
=
lst
[
2
]
if
'InstalledDir'
in
line
:
lst
=
line
.
split
()
rocm_vers
=
lst
[
1
][
lst
[
1
].
find
(
'/opt/rocm-'
)
+
len
(
'/opt/rocm-'
):
lst
[
1
].
rfind
(
'/llvm/bin'
)]
print
(
"Branch name:"
,
branch_name
)
print
(
"Node name:"
,
node_id
)
print
(
"GPU_arch:"
,
gpu_arch
)
print
(
"Compute units:"
,
compute_units
)
print
(
"ROCM_version:"
,
rocm_vers
)
print
(
"HIP_version:"
,
hip_vers
)
#parse gemm performance tests:
if
'gemm'
in
filename
:
for
filename
in
args
.
files
:
for
line
in
open
(
filename
):
if
'Best Perf'
in
line
:
lst
=
line
.
split
()
if
len
(
lst
)
>=
37
:
#the line is complete
tests
.
append
(
glue
.
join
(
lst
[
5
:
30
]))
kernels
.
append
(
glue
.
join
(
lst
[
37
:]))
tflops
.
append
(
lst
[
33
])
dtype
.
append
(
lst
[
5
])
alayout
.
append
(
lst
[
8
])
blayout
.
append
(
lst
[
11
])
M
.
append
(
lst
[
14
])
N
.
append
(
lst
[
17
])
K
.
append
(
lst
[
20
])
StrideA
.
append
(
lst
[
23
])
StrideB
.
append
(
lst
[
26
])
StrideC
.
append
(
lst
[
29
])
elif
len
(
lst
)
<
37
and
len
(
lst
)
>=
33
:
#the tflops are available
tests
.
append
(
glue
.
join
(
lst
[
5
:
30
]))
kernels
.
append
(
"N/A"
)
tflops
.
append
(
lst
[
33
])
dtype
.
append
(
lst
[
5
])
alayout
.
append
(
lst
[
8
])
blayout
.
append
(
lst
[
11
])
M
.
append
(
lst
[
14
])
N
.
append
(
lst
[
17
])
K
.
append
(
lst
[
20
])
StrideA
.
append
(
lst
[
23
])
StrideB
.
append
(
lst
[
26
])
StrideC
.
append
(
lst
[
29
])
print
(
"warning: incomplete line:"
,
lst
)
elif
len
(
lst
)
<
33
:
#even the tflops are not available
print
(
"Error in ckProfiler output!"
)
print
(
"warning: incomplete line="
,
lst
)
#sort results
#sorted_tests = sorted(tests)
#print("sorted tests:",sorted_tests)
sorted_tflops
=
[
x
for
_
,
x
in
sorted
(
zip
(
tests
,
tflops
))]
#sorted_kernels = [x for _,x in sorted(zip(tests,kernels))]
test_list
=
list
(
range
(
1
,
len
(
tests
)
+
1
))
#parse resnet50 performance tests:
if
'resnet50'
in
filename
:
for
filename
in
args
.
files
:
for
line
in
open
(
filename
):
if
'Best Perf'
in
line
:
lst
=
line
.
split
()
tflops
.
append
(
lst
[
4
])
print
(
"Number of tests:"
,
len
(
tflops
))
sql_hostname
=
'127.0.0.1'
sql_username
=
os
.
environ
[
"dbuser"
]
sql_password
=
os
.
environ
[
"dbpassword"
]
sql_main_database
=
'miopen_perf'
sql_port
=
3306
ssh_host
=
os
.
environ
[
"dbsship"
]
ssh_user
=
os
.
environ
[
"dbsshuser"
]
ssh_port
=
int
(
os
.
environ
[
"dbsshport"
])
ssh_pass
=
os
.
environ
[
"dbsshpassword"
]
with
SSHTunnelForwarder
(
(
ssh_host
,
ssh_port
),
ssh_username
=
ssh_user
,
ssh_password
=
ssh_pass
,
remote_bind_address
=
(
sql_hostname
,
sql_port
))
as
tunnel
:
sqlEngine
=
sqlalchemy
.
create_engine
(
'mysql+pymysql://{0}:{1}@{2}:{3}/{4}'
.
format
(
sql_username
,
sql_password
,
sql_hostname
,
tunnel
.
local_bind_port
,
sql_main_database
))
conn
=
sqlEngine
.
connect
()
#save gemm performance tests:
if
'gemm'
in
filename
:
#write the ck_gemm_test_params table
#only needed once the test set changes
'''
sorted_dtypes = [x for _,x in sorted(zip(tests,dtype))]
sorted_alayout = [x for _,x in sorted(zip(tests,alayout))]
sorted_blayout = [x for _,x in sorted(zip(tests,blayout))]
sorted_M = [x for _,x in sorted(zip(tests,M))]
sorted_N = [x for _,x in sorted(zip(tests,N))]
sorted_K = [x for _,x in sorted(zip(tests,K))]
sorted_StrideA = [x for _,x in sorted(zip(tests,StrideA))]
sorted_StrideB = [x for _,x in sorted(zip(tests,StrideB))]
sorted_StrideC = [x for _,x in sorted(zip(tests,StrideC))]
ck_gemm_params=[test_list,sorted_dtypes,sorted_alayout,sorted_blayout,
sorted_M,sorted_N,sorted_K,sorted_StrideA,sorted_StrideB,
sorted_StrideC]
df=pd.DataFrame(np.transpose(ck_gemm_params),columns=['Test_number','Data_type',
'Alayout','BLayout','M','N','K', 'StrideA','StrideB','StrideC'])
print(df)
dtypes = {
'Test_number': Integer(),
'Data_type': NVARCHAR(length=5),
'Alayout': NVARCHAR(length=12),
'Blayout': NVARCHAR(length=12),
'M': Integer(),
'N': Integer(),
'K': Integer(),
'StrideA': Integer(),
'StrideB': Integer(),
'StrideC': Integer()
}
df.to_sql("ck_gemm_test_params",conn,if_exists='replace',index=False, dtype=dtypes)
'''
#read baseline results for the latest develop branch
query
=
'''SELECT * from ck_gemm_tflops WHERE Datetime = (SELECT MAX(Datetime) FROM ck_gemm_tflops where Branch_ID='develop' );'''
tflops_base
=
pd
.
read_sql_query
(
query
,
conn
)
#write new results to the db
testlist
=
[]
for
i
in
range
(
1
,
len
(
tests
)
+
1
):
testlist
.
append
(
"Test%i"
%
i
)
ck_gemm_tflops
=
[
str
(
branch_name
),
str
(
node_id
),
str
(
gpu_arch
),
compute_units
,
str
(
rocm_vers
),
str
(
hip_vers
),
str
(
datetime
.
datetime
.
now
())]
flops
=
pd
.
DataFrame
(
data
=
[
ck_gemm_tflops
],
columns
=
[
'Branch_ID'
,
'Node_ID'
,
'GPU_arch'
,
'Compute Units'
,
'ROCM_version'
,
'HIP_version'
,
'Datetime'
])
df_add
=
pd
.
DataFrame
(
data
=
[
sorted_tflops
],
columns
=
testlist
)
flops
=
pd
.
concat
([
flops
,
df_add
],
axis
=
1
)
print
(
"new tflops for gemm tests:"
,
flops
)
flops
.
to_sql
(
"ck_gemm_tflops"
,
conn
,
if_exists
=
'append'
,
index
=
False
)
#save resnet50 performance tests:
if
'resnet50'
in
filename
:
#read baseline results for the latest develop branch
query
=
'''SELECT * from ck_resnet50_N256_tflops WHERE Datetime = (SELECT MAX(Datetime) FROM ck_resnet50_N256_tflops where Branch_ID='develop' );'''
tflops_base_N256
=
pd
.
read_sql_query
(
query
,
conn
)
query
=
'''SELECT * from ck_resnet50_N4_tflops WHERE Datetime = (SELECT MAX(Datetime) FROM ck_resnet50_N4_tflops where Branch_ID='develop' );'''
tflops_base_N4
=
pd
.
read_sql_query
(
query
,
conn
)
#write new results to the db
testlist
=
[]
for
i
in
range
(
1
,
50
):
testlist
.
append
(
"Layer%i"
%
i
)
ck_resnet_tflops
=
[
str
(
branch_name
),
str
(
node_id
),
str
(
gpu_arch
),
compute_units
,
str
(
rocm_vers
),
str
(
hip_vers
),
str
(
datetime
.
datetime
.
now
())]
flops0
=
pd
.
DataFrame
(
data
=
[
ck_resnet_tflops
],
columns
=
[
'Branch_ID'
,
'Node_ID'
,
'GPU_arch'
,
'Compute Units'
,
'ROCM_version'
,
'HIP_version'
,
'Datetime'
])
df_add
=
pd
.
DataFrame
(
data
=
[
tflops
[
0
:
49
]],
columns
=
testlist
)
flops
=
pd
.
concat
([
flops0
,
df_add
],
axis
=
1
)
print
(
"new tflops for N=256 resnet50 test:"
,
flops
)
flops
.
to_sql
(
"ck_resnet50_N256_tflops"
,
conn
,
if_exists
=
'append'
,
index
=
False
)
df_add
=
pd
.
DataFrame
(
data
=
[
tflops
[
49
:
98
]],
columns
=
testlist
)
flops
=
pd
.
concat
([
flops0
,
df_add
],
axis
=
1
)
print
(
"new tflops for N=4 resnet50 test:"
,
flops
)
flops
.
to_sql
(
"ck_resnet50_N4_tflops"
,
conn
,
if_exists
=
'append'
,
index
=
False
)
conn
.
close
()
#compare the results to the baseline if baseline exists
regression
=
0
if
'gemm'
in
filename
:
if
not
tflops_base
.
empty
:
base
=
tflops_base
[
testlist
].
to_numpy
(
dtype
=
'float'
)
base_list
=
base
[
0
]
ave_perf
=
0
for
i
in
range
(
len
(
base_list
)):
# success criterion:
if
base_list
[
i
]
>
1.01
*
float
(
sorted_tflops
[
i
]):
print
(
"test # "
,
i
,
"shows regression by {:.3f}%"
.
format
(
(
float
(
sorted_tflops
[
i
])
-
base_list
[
i
])
/
base_list
[
i
]
*
100
))
regression
=
1
ave_perf
=
ave_perf
+
float
(
sorted_tflops
[
i
])
/
base_list
[
i
]
if
regression
==
0
:
print
(
"no regressions found"
)
ave_perf
=
ave_perf
/
len
(
base_list
)
print
(
"average performance relative to baseline:"
,
ave_perf
)
else
:
print
(
"could not find a baseline"
)
if
'resnet50'
in
filename
:
if
not
tflops_base_N256
.
empty
:
base
=
tflops_base_N256
[
testlist
].
to_numpy
(
dtype
=
'float'
)
base_list
=
base
[
0
]
ave_perf
=
0
for
i
in
range
(
len
(
base_list
)):
# success criterion:
if
base_list
[
i
]
>
1.01
*
float
(
tflops
[
i
]):
print
(
"layer # "
,
i
,
"shows regression by {:.3f}%"
.
format
(
(
float
(
tflops
[
i
])
-
base_list
[
i
])
/
base_list
[
i
]
*
100
))
regression
=
1
ave_perf
=
ave_perf
+
float
(
tflops
[
i
])
/
base_list
[
i
]
if
regression
==
0
:
print
(
"no regressions found"
)
ave_perf
=
ave_perf
/
len
(
base_list
)
print
(
"average performance relative to baseline:"
,
ave_perf
)
else
:
print
(
"could not find a baseline for N=256"
)
if
not
tflops_base_N4
.
empty
:
base
=
tflops_base_N4
[
testlist
].
to_numpy
(
dtype
=
'float'
)
base_list
=
base
[
0
]
ave_perf
=
0
for
i
in
range
(
len
(
base_list
)):
# success criterion:
if
base_list
[
i
]
>
1.01
*
float
(
tflops
[
i
+
49
]):
print
(
"layer # "
,
i
,
"shows regression by {:.3f}%"
.
format
(
(
float
(
tflops
[
i
+
49
])
-
base_list
[
i
])
/
base_list
[
i
]
*
100
))
regression
=
1
ave_perf
=
ave_perf
+
float
(
tflops
[
i
+
49
])
/
base_list
[
i
]
if
regression
==
0
:
print
(
"no regressions found"
)
ave_perf
=
ave_perf
/
len
(
base_list
)
print
(
"average performance relative to baseline:"
,
ave_perf
)
else
:
print
(
"could not find a baseline for N=4"
)
#return 0 if performance criteria met, otherwise return 1
return
regression
if
__name__
==
'__main__'
:
main
()
\ No newline at end of file
script/process_perf_data.py
View file @
29dcb956
...
...
@@ -133,7 +133,7 @@ def parse_logfile(logfile):
if
'Best Perf'
in
line
:
lst
=
line
.
split
()
res
.
append
(
lst
[
4
])
elif
'onnx_gemm'
in
logfile
or
'splitK_gemm'
in
logfile
:
elif
'onnx_gemm'
in
logfile
or
'splitK_gemm'
in
logfile
or
'mixed_gemm'
in
logfile
:
for
line
in
open
(
logfile
):
if
'Best Perf'
in
line
:
lst
=
line
.
split
()
...
...
@@ -295,6 +295,10 @@ def main():
for
i
in
range
(
1
,
len
(
results
)
+
1
):
testlist
.
append
(
"Test%i"
%
i
)
table_name
=
"ck_splitK_gemm_tflops"
if
'mixed_gemm'
in
filename
:
for
i
in
range
(
1
,
len
(
results
)
+
1
):
testlist
.
append
(
"Test%i"
%
i
)
table_name
=
"ck_mixed_gemm_tflops"
tflops_base
=
get_baseline
(
table_name
,
conn
)
store_new_test_result
(
table_name
,
results
,
testlist
,
branch_name
,
node_id
,
gpu_arch
,
compute_units
,
rocm_vers
,
hip_vers
,
environment
,
conn
)
...
...
script/profile_mixed_gemm.sh
0 → 100755
View file @
29dcb956
#!/bin/bash
## GPU visibility
export
HIP_VISIBLE_DEVICES
=
0
DRIVER
=
"../build/bin/ckProfiler"
echo
$DRIVER
OP
=
$1
DATATYPE
=
$2
LAYOUT
=
$3
VERIFY
=
$4
INIT
=
$5
LOG
=
$6
TIME
=
$7
KBatch
=
$8
######## op datatype layout verify init log time M___ N___ K___ StrideA StrideB StrideC KBatch_
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 16 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 16 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 16 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 2048 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 2048 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 2048 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 8192 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 8192 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
16 8192 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 16 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 16 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 16 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 2048 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 2048 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 2048 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 8192 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 8192 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
2048 8192 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 16 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 16 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 16 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 2048 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 2048 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 2048 65536
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 8192 1024
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 8192 8192
-1
-1
-1
$KBatch
$DRIVER
$OP
$DATATYPE
$LAYOUT
$VERIFY
$INIT
$LOG
$TIME
8192 8192 65536
-1
-1
-1
$KBatch
\ No newline at end of file
Prev
1
…
14
15
16
17
18
19
20
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment