Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
50540084
Commit
50540084
authored
Aug 07, 2023
by
Adam Osewski
Browse files
Merge branch 'develop' into aosewski/gemm_tile_loop
parents
9c4d265f
2474dddb
Changes
176
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
1204 additions
and
79 deletions
+1204
-79
profiler/src/profile_gemm.cpp
profiler/src/profile_gemm.cpp
+18
-4
profiler/src/profile_gemm_streamk.cpp
profiler/src/profile_gemm_streamk.cpp
+155
-0
profiler/src/profile_grouped_conv_bwd_data.cpp
profiler/src/profile_grouped_conv_bwd_data.cpp
+56
-27
profiler/src/profile_grouped_conv_bwd_weight.cpp
profiler/src/profile_grouped_conv_bwd_weight.cpp
+36
-11
script/profile_batched_gemm.sh
script/profile_batched_gemm.sh
+0
-7
test/batched_gemm_multi_d/CMakeLists.txt
test/batched_gemm_multi_d/CMakeLists.txt
+3
-3
test/batched_gemm_multi_d/test_batched_gemm_multi_d.cpp
test/batched_gemm_multi_d/test_batched_gemm_multi_d.cpp
+4
-2
test/block_swizzle_test/block_swizzle_test.cpp
test/block_swizzle_test/block_swizzle_test.cpp
+406
-0
test/block_swizzle_test/rebuild.sh
test/block_swizzle_test/rebuild.sh
+3
-0
test/block_swizzle_test/simple_args.h
test/block_swizzle_test/simple_args.h
+159
-0
test/gemm/CMakeLists.txt
test/gemm/CMakeLists.txt
+14
-10
test/grouped_convnd_bwd_data/CMakeLists.txt
test/grouped_convnd_bwd_data/CMakeLists.txt
+1
-1
test/grouped_convnd_bwd_data/test_grouped_convnd_bwd_data.cpp
.../grouped_convnd_bwd_data/test_grouped_convnd_bwd_data.cpp
+38
-13
test/grouped_convnd_bwd_weight/CMakeLists.txt
test/grouped_convnd_bwd_weight/CMakeLists.txt
+3
-1
test/grouped_convnd_bwd_weight/test_grouped_convnd_bwd_weight.cpp
...uped_convnd_bwd_weight/test_grouped_convnd_bwd_weight.cpp
+128
-0
test/grouped_convnd_bwd_weight/test_grouped_convnd_bwd_weight_interface.cpp
...d_bwd_weight/test_grouped_convnd_bwd_weight_interface.cpp
+180
-0
No files found.
profiler/src/profile_gemm.cpp
View file @
50540084
...
@@ -67,11 +67,15 @@ int profile_gemm(int argc, char* argv[])
...
@@ -67,11 +67,15 @@ int profile_gemm(int argc, char* argv[])
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideC
=
std
::
stoi
(
argv
[
13
]);
const
int
StrideC
=
std
::
stoi
(
argv
[
13
]);
using
F32
=
float
;
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
#ifdef __bf16__
using
BF16
=
ck
::
bhalf_t
;
#endif
#ifdef __int8__
using
INT8
=
int8_t
;
using
INT8
=
int8_t
;
using
INT32
=
int32_t
;
using
INT32
=
int32_t
;
#endif
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
...
@@ -117,7 +121,10 @@ int profile_gemm(int argc, char* argv[])
...
@@ -117,7 +121,10 @@ int profile_gemm(int argc, char* argv[])
return
pass
?
0
:
1
;
return
pass
?
0
:
1
;
};
};
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
if
(
false
)
;
#ifdef __fp32__
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
{
return
profile
(
Row
{},
Row
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
return
profile
(
Row
{},
Row
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
}
...
@@ -133,6 +140,8 @@ int profile_gemm(int argc, char* argv[])
...
@@ -133,6 +140,8 @@ int profile_gemm(int argc, char* argv[])
{
{
return
profile
(
Col
{},
Col
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
return
profile
(
Col
{},
Col
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
}
#endif
#ifdef __fp16__
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
{
return
profile
(
Row
{},
Row
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
return
profile
(
Row
{},
Row
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
...
@@ -149,6 +158,8 @@ int profile_gemm(int argc, char* argv[])
...
@@ -149,6 +158,8 @@ int profile_gemm(int argc, char* argv[])
{
{
return
profile
(
Col
{},
Col
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
return
profile
(
Col
{},
Col
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
}
}
#endif
#ifdef __bf16__
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
{
return
profile
(
Row
{},
Row
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
return
profile
(
Row
{},
Row
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
...
@@ -165,6 +176,8 @@ int profile_gemm(int argc, char* argv[])
...
@@ -165,6 +176,8 @@ int profile_gemm(int argc, char* argv[])
{
{
return
profile
(
Col
{},
Col
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
return
profile
(
Col
{},
Col
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
}
}
#endif
#ifdef __int8__
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
{
return
profile
(
Row
{},
Row
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
return
profile
(
Row
{},
Row
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
...
@@ -181,6 +194,7 @@ int profile_gemm(int argc, char* argv[])
...
@@ -181,6 +194,7 @@ int profile_gemm(int argc, char* argv[])
{
{
return
profile
(
Col
{},
Col
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
return
profile
(
Col
{},
Col
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
}
}
#endif
else
else
{
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
...
...
profiler/src/profile_gemm_streamk.cpp
0 → 100644
View file @
50540084
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/profile_gemm_streamk_impl.hpp"
#include "profiler_operation_registry.hpp"
enum
struct
GemmMatrixLayout
{
MK_KN_MN
,
// 0
MK_NK_MN
,
// 1
KM_KN_MN
,
// 2
KM_NK_MN
,
// 3
};
enum
struct
GemmDataType
{
F32_F32_F32
,
// 0
F16_F16_F16
,
// 1
BF16_BF16_BF16
,
// 2
INT8_INT8_INT8
,
// 3
};
#define OP_NAME "gemm_streamk"
#define OP_DESC "StreamK GEMM"
int
profile_gemm_streamk
(
int
argc
,
char
*
argv
[])
{
if
(
argc
<
14
)
{
printf
(
"arg1: tensor operation ("
OP_NAME
": "
OP_DESC
")
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)
\n
"
);
printf
(
"arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];
\n
"
);
printf
(
" 1: A[m, k] * B[n, k] = C[m, n];
\n
"
);
printf
(
" 2: A[k, m] * B[k, n] = C[m, n];
\n
"
);
printf
(
" 3: A[k, m] * B[n, k] = C[m, n])
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 13: M, N, K, StrideA, StrideB, StrideC
\n
"
);
printf
(
"arg14: num_sk_blocks (optional)
\n
"
);
exit
(
1
);
}
const
auto
data_type
=
static_cast
<
GemmDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
GemmMatrixLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
M
=
std
::
stoi
(
argv
[
8
]);
const
int
N
=
std
::
stoi
(
argv
[
9
]);
const
int
K
=
std
::
stoi
(
argv
[
10
]);
const
int
StrideA
=
std
::
stoi
(
argv
[
11
]);
const
int
StrideB
=
std
::
stoi
(
argv
[
12
]);
const
int
StrideC
=
std
::
stoi
(
argv
[
13
]);
const
uint32_t
NumSKBlocks
=
argc
>=
15
?
static_cast
<
uint32_t
>
(
std
::
stoul
(
std
::
string
(
argv
[
14
])))
:
0xffffffff
;
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
auto
profile
=
[
&
](
auto
a_type
,
auto
b_type
,
auto
acc_type
,
auto
c_type
,
auto
a_layout
,
auto
b_layout
,
auto
c_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
CDataType
=
decltype
(
c_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
CLayout
=
decltype
(
c_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideC
=
ck
::
is_same_v
<
CLayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_streamk_impl
<
ADataType
,
BDataType
,
AccDataType
,
CDataType
,
ALayout
,
BLayout
,
CLayout
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
M
,
N
,
K
,
(
StrideA
<=
0
)
?
DefaultStrideA
:
StrideA
,
(
StrideB
<=
0
)
?
DefaultStrideB
:
StrideB
,
(
StrideC
<=
0
)
?
DefaultStrideC
:
StrideC
,
NumSKBlocks
);
return
pass
?
0
:
1
;
};
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Row
{},
Col
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Col
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Col
{},
Col
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
Row
{},
Col
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
Col
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
Col
{},
Col
{},
Row
{});
}
else
{
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
}
REGISTER_PROFILER_OPERATION
(
OP_NAME
,
OP_DESC
,
profile_gemm_streamk
);
profiler/src/profile_grouped_conv_bwd_data.cpp
View file @
50540084
...
@@ -77,15 +77,10 @@ int profile_grouped_conv_bwd_data(int argc, char* argv[])
...
@@ -77,15 +77,10 @@ int profile_grouped_conv_bwd_data(int argc, char* argv[])
using
F16
=
ck
::
half_t
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
BF16
=
ck
::
bhalf_t
;
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
namespace
ck
::
tensor_layout
::
convolution
;
using
NHWGC
=
ck
::
tensor_layout
::
convolution
::
NHWGC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
NHWGK
=
ck
::
tensor_layout
::
convolution
::
NHWGK
;
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
constexpr
auto
I3
=
ck
::
Number
<
3
>
{};
auto
profile
=
[
&
](
auto
num_dim_spatial_tmp
,
auto
profile
=
[
&
](
auto
num_dim_spatial_tmp
,
auto
out_layout
,
auto
out_layout
,
...
@@ -116,36 +111,70 @@ int profile_grouped_conv_bwd_data(int argc, char* argv[])
...
@@ -116,36 +111,70 @@ int profile_grouped_conv_bwd_data(int argc, char* argv[])
return
pass
?
0
:
1
;
return
pass
?
0
:
1
;
};
};
// GNHWC_GKYXC_GNHWK
if
(
num_dim_spatial
==
2
)
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
if
(
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
F16
{},
F16
{},
F16
{});
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
BF16
{},
BF16
{},
BF16
{});
}
}
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
else
if
(
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
{
return
profile
(
I2
,
GNHWK
{},
GKYXC
{},
GNHWC
{},
BF16
{},
BF16
{},
BF16
{});
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
BF16
{},
BF16
{},
BF16
{});
}
}
}
}
}
// NHWGC_GKYXC_NHWGK
else
if
(
num_dim_spatial
==
3
)
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
if
(
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
F16
{},
F16
{},
F16
{});
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
GNDHWK
{},
GKZYXC
{},
GNDHWC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
GNDHWK
{},
GKZYXC
{},
GNDHWC
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
GNDHWK
{},
GKZYXC
{},
GNDHWC
{},
BF16
{},
BF16
{},
BF16
{});
}
}
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
else
if
(
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
{
return
profile
(
I2
,
NHWGK
{},
GKYXC
{},
NHWGC
{},
BF16
{},
BF16
{},
BF16
{});
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
NDHWGK
{},
GKZYXC
{},
NDHWGC
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
NDHWGK
{},
GKZYXC
{},
NDHWGC
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
NDHWGK
{},
GKZYXC
{},
NDHWGC
{},
BF16
{},
BF16
{},
BF16
{});
}
}
}
}
}
...
...
profiler/src/profile_grouped_conv_bwd_weight.cpp
View file @
50540084
...
@@ -15,6 +15,7 @@ enum struct ConvLayout
...
@@ -15,6 +15,7 @@ enum struct ConvLayout
{
{
GNCHW_GKCYX_GNKHW
,
// 0
GNCHW_GKCYX_GNKHW
,
// 0
GNHWC_GKYXC_GNHWK
,
// 1
GNHWC_GKYXC_GNHWK
,
// 1
NHWGC_GKYXC_NHWGK
,
// 2
};
};
enum
struct
ConvDataType
enum
struct
ConvDataType
...
@@ -37,6 +38,8 @@ static void print_helper_msg()
...
@@ -37,6 +38,8 @@ static void print_helper_msg()
"N, K, Ho, Wo]
\n
"
"N, K, Ho, Wo]
\n
"
<<
" 1: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, "
<<
" 1: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, "
"N, Ho, Wo, K]
\n
"
"N, Ho, Wo, K]
\n
"
<<
" 2: Input[N, Hi, Wi, G, C], Weight[G, K, Y, X, C], Output[N, "
"Ho, Wo, G, K]
\n
"
<<
"arg4: verification (0: no, 1: yes)
\n
"
<<
"arg4: verification (0: no, 1: yes)
\n
"
<<
"arg5: initialization (0: no init, 1: integer value, 2: decimal value)
\n
"
<<
"arg5: initialization (0: no init, 1: integer value, 2: decimal value)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
...
@@ -80,17 +83,7 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
...
@@ -80,17 +83,7 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
using
F16
=
ck
::
half_t
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
BF16
=
ck
::
bhalf_t
;
using
GNWC
=
ck
::
tensor_layout
::
convolution
::
GNWC
;
using
namespace
ck
::
tensor_layout
::
convolution
;
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
GNDHWC
=
ck
::
tensor_layout
::
convolution
::
GNDHWC
;
using
GKXC
=
ck
::
tensor_layout
::
convolution
::
GKXC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GKZYXC
=
ck
::
tensor_layout
::
convolution
::
GKZYXC
;
using
GNWK
=
ck
::
tensor_layout
::
convolution
::
GNWK
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
GNDHWK
=
ck
::
tensor_layout
::
convolution
::
GNDHWK
;
constexpr
auto
I1
=
ck
::
Number
<
1
>
{};
constexpr
auto
I1
=
ck
::
Number
<
1
>
{};
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
...
@@ -157,6 +150,22 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
...
@@ -157,6 +150,22 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
BF16
{},
F32
{},
BF16
{});
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
BF16
{},
F32
{},
BF16
{});
}
}
}
}
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_F32_BF16
)
{
// fp32 atomic add is used for weight tensor in bf16 kernel
return
profile
(
I2
,
NHWGC
{},
GKYXC
{},
NHWGK
{},
BF16
{},
F32
{},
BF16
{});
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
...
@@ -173,6 +182,22 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
...
@@ -173,6 +182,22 @@ int profile_grouped_conv_bwd_weight(int argc, char* argv[])
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
BF16
{},
F32
{},
BF16
{});
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
BF16
{},
F32
{},
BF16
{});
}
}
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
NHWGC_GKYXC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_F32_BF16
)
{
// fp32 atomic add is used for weight tensor in bf16 kernel
return
profile
(
I3
,
NDHWGC
{},
GKZYXC
{},
NDHWGK
{},
BF16
{},
F32
{},
BF16
{});
}
}
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
...
...
script/profile_batched_gemm.sh
View file @
50540084
...
@@ -3,13 +3,6 @@
...
@@ -3,13 +3,6 @@
## GPU visibility
## GPU visibility
export
HIP_VISIBLE_DEVICES
=
0
export
HIP_VISIBLE_DEVICES
=
0
DRIVER
=
"../build/bin/ckProfiler"
DRIVER
=
"../build/bin/ckProfiler"
OP
=
$1
DATATYPE
=
$2
LAYOUT
=
$3
VERIFY
=
$4
INIT
=
$5
LOG
=
$6
TIME
=
$7
OP
=
$1
OP
=
$1
DATATYPE
=
$2
DATATYPE
=
$2
...
...
test/batched_gemm_multi_d/CMakeLists.txt
View file @
50540084
# TODO: Enable for gfx90a after complier fix
# TODO: Enable for gfx90a after complier fix
if
(
NOT GPU_TARGETS MATCHES
"gfx90a"
)
if
(
DL_KERNELS
)
add_gtest_executable
(
test_batched_gemm_multi_d test_batched_gemm_multi_d.cpp
)
add_gtest_executable
(
test_batched_gemm_multi_d test_batched_gemm_multi_d.cpp
)
target_link_libraries
(
test_batched_gemm_multi_d PRIVATE utility device_batched_gemm_multi_d_instance
)
target_link_libraries
(
test_batched_gemm_multi_d PRIVATE utility device_batched_gemm_multi_d_instance
)
endif
()
endif
()
test/batched_gemm_multi_d/test_batched_gemm_multi_d.cpp
View file @
50540084
...
@@ -68,7 +68,9 @@ using KernelTypes = ::testing::Types<std::tuple<Row, Row, Row>,
...
@@ -68,7 +68,9 @@ using KernelTypes = ::testing::Types<std::tuple<Row, Row, Row>,
}
// namespace
}
// namespace
TYPED_TEST_SUITE
(
TestBatchedGemmMultiD
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestBatchedGemmMultiD
,
KernelTypes
);
#ifdef __fp16
TYPED_TEST
(
TestBatchedGemmMultiD
,
f16
)
{
this
->
template
Run
<
F16
>();
}
TYPED_TEST
(
TestBatchedGemmMultiD
,
f16
)
{
this
->
template
Run
<
F16
>();
}
#endif
#ifdef __int8__
TYPED_TEST
(
TestBatchedGemmMultiD
,
int8
)
{
this
->
template
Run
<
int8_t
>();
}
TYPED_TEST
(
TestBatchedGemmMultiD
,
int8
)
{
this
->
template
Run
<
int8_t
>();
}
#endif
test/block_swizzle_test/block_swizzle_test.cpp
0 → 100644
View file @
50540084
#include <stdio.h>
#include <string>
#include <algorithm>
#include <vector>
#include <limits>
#include "simple_args.h"
simple_args_t
create_arg
(
int
argc
,
char
**
argv
)
{
simple_args_t
args
;
args
.
insert
(
"m"
,
"1024"
,
"matrix m"
)
.
insert
(
"n"
,
"1024"
,
"matrix n"
)
.
insert
(
"k"
,
"1024"
,
"matrix k"
)
.
insert
(
"m_per_block"
,
"128"
,
"m_per_block"
)
.
insert
(
"n_per_block"
,
"128"
,
"n_per_block"
)
.
insert
(
"k_per_block"
,
"32"
,
"k_per_block"
)
.
insert
(
"num_cu"
,
"104"
,
"num cu"
)
.
insert
(
"occupancy"
,
"2"
,
"occupancy"
)
.
parse
(
argc
,
argv
);
return
args
;
}
namespace
impl
{
template
<
typename
T
>
T
integer_divide_ceil
(
T
n
,
T
d
)
{
return
(
n
+
d
-
1
)
/
d
;
}
template
<
typename
T
>
T
min
(
T
a
,
T
b
)
{
return
a
>
b
?
b
:
a
;
}
template
<
typename
T
>
T
max
(
T
a
,
T
b
)
{
return
a
>
b
?
a
:
b
;
}
}
// namespace impl
struct
block_dispatcher_t
{
public:
uint32_t
m_per_block
;
uint32_t
n_per_block
;
uint32_t
k_per_block
;
uint32_t
num_cu
;
uint32_t
occupancy
;
uint32_t
m
;
uint32_t
n
;
uint32_t
k
;
//--------------------------------------
uint32_t
sk_num_blocks
;
uint32_t
sk_num_big_blocks
;
uint32_t
sk_total_iters
;
// uint32_t sk_num_blocks_per_tile; // how many
uint32_t
dp_start_block_idx
;
uint32_t
dp_iters_per_block
;
uint32_t
dp_num_blocks
;
uint32_t
k_iters_per_tile
;
uint32_t
k_iters_per_big_block
;
//--------------------------------------
static
constexpr
uint32_t
min_k_iters_per_sk_block
=
1
;
void
dump
()
{
printf
(
"%dx%dx%d(%dx%dx%d), cu:%d, occ:%d, grids:%d, sk_num_big_blocks:%d, "
"sk_num_blocks:%d, sk_total_iters:%d, dp_start_block_idx:%d, dp_iters_per_block:%d, "
"dp_num_blocks:%d, k_iters_per_tile:%d, k_iters_per_big_block:%d
\n
"
,
m
,
n
,
k
,
m_per_block
,
n_per_block
,
k_per_block
,
num_cu
,
occupancy
,
get_grid_dims_x
(),
sk_num_big_blocks
,
sk_num_blocks
,
sk_total_iters
,
dp_start_block_idx
,
dp_iters_per_block
,
dp_num_blocks
,
k_iters_per_tile
,
k_iters_per_big_block
);
}
block_dispatcher_t
(
uint32_t
m_per_block_
,
uint32_t
n_per_block_
,
uint32_t
k_per_block_
,
uint32_t
num_cu_
,
uint32_t
occupancy_
,
uint32_t
m_
,
uint32_t
n_
,
uint32_t
k_
)
:
m_per_block
(
m_per_block_
),
n_per_block
(
n_per_block_
),
k_per_block
(
k_per_block_
),
num_cu
(
num_cu_
),
occupancy
(
occupancy_
),
m
(
m_
),
n
(
n_
),
k
(
k_
)
{
init
();
}
uint32_t
get_grid_dims_x
()
{
return
dp_start_block_idx
+
dp_num_blocks
;
}
uint32_t
get_block_idx
(
uint32_t
bid
)
{
// block id is linearily allocated along sk blocks (dp blocks are fine)
// this function will compute blockIdx.x and the linear sk block mapping
// uint32_t block_idx = 0;
// if(bid < sk_num_big_blocks) {
// uint32_t current_k_iter = bid * k_iters_per_big_block;
// tile_idx = current_k_iter / k_iters_per_tile;
// }
return
bid
;
}
uint32_t
get_current_itr
(
uint32_t
block_idx
)
{
uint32_t
current_itr
=
0
;
if
(
block_idx
<
sk_num_big_blocks
)
{
current_itr
=
block_idx
*
k_iters_per_big_block
;
}
else
if
(
block_idx
<
sk_num_blocks
)
{
current_itr
=
(
sk_num_big_blocks
*
k_iters_per_big_block
)
+
(
block_idx
-
sk_num_big_blocks
)
*
(
k_iters_per_big_block
-
1
);
}
else
if
(
block_idx
>=
dp_start_block_idx
)
{
current_itr
=
sk_total_iters
+
(
block_idx
-
dp_start_block_idx
)
*
dp_iters_per_block
;
}
return
current_itr
;
}
void
get_block_itr
(
uint32_t
block_idx
,
uint32_t
&
iter_start
,
uint32_t
&
iter_end
)
{
if
(
block_idx
<
sk_num_big_blocks
)
{
iter_start
=
block_idx
*
k_iters_per_big_block
;
iter_end
=
iter_start
+
k_iters_per_big_block
;
}
else
if
(
block_idx
<
sk_num_blocks
)
{
iter_start
=
(
sk_num_big_blocks
*
k_iters_per_big_block
)
+
(
block_idx
-
sk_num_big_blocks
)
*
(
k_iters_per_big_block
-
1
);
iter_end
=
iter_start
+
(
k_iters_per_big_block
-
1
);
}
else
if
(
block_idx
>=
dp_start_block_idx
)
{
iter_start
=
sk_total_iters
+
(
block_idx
-
dp_start_block_idx
)
*
dp_iters_per_block
;
iter_end
=
iter_start
+
dp_iters_per_block
;
}
}
private:
void
init
()
{
uint32_t
num_tiles
=
impl
::
integer_divide_ceil
(
m
,
m_per_block
)
*
impl
::
integer_divide_ceil
(
n
,
n_per_block
);
k_iters_per_tile
=
impl
::
integer_divide_ceil
(
k
,
k_per_block
);
// one cu can hold one wg at one time, from the whole chip's point of view
// if number of wg is same as num_cu, we call it 1 dispatch
// if number of wg is 2x num_cu, we call it 2 dispatches.
// one dispatch can deliever wg same as num_cu (full dispatch), or less than num_cu (partial
// dispatch)
//
uint32_t
full_dispatches
=
num_tiles
/
num_cu
;
uint32_t
full_dispatch_tiles
=
full_dispatches
*
num_cu
;
uint32_t
partial_dispatche_tiles
=
num_tiles
-
full_dispatch_tiles
;
uint32_t
sk_occupancy
=
occupancy
;
uint32_t
dp_tiles
=
full_dispatch_tiles
;
uint32_t
sk_tiles
=
partial_dispatche_tiles
;
if
(
full_dispatches
<
occupancy
)
{
// in this case, we allocate all blocks as sk blocks
// sk_occupancy = occupancy - full_dispatches;
sk_occupancy
=
1
;
// TODO: single occ seems better
dp_tiles
=
full_dispatch_tiles
;
sk_tiles
=
partial_dispatche_tiles
;
}
else
if
((
occupancy
>
1
)
&&
(
full_dispatches
%
occupancy
==
occupancy
-
1
))
{
// e.g. occupancy = 2, full_dispatches = 3, 5, 7 ...
// occupancy = 3, full_dispatches = 5, 8, 11 ...
// occupancy = 4, full_dispatches = 7, 11 ...
sk_occupancy
=
1
;
// left 1 slot for sk occupancy
dp_tiles
=
full_dispatch_tiles
;
sk_tiles
=
partial_dispatche_tiles
;
}
else
{
// others, we reduce 1 dispatch from dp, together with partial dispatch,
// to construct sk dispatch
sk_occupancy
=
occupancy
-
((
full_dispatches
-
1
)
%
occupancy
);
dp_tiles
=
full_dispatch_tiles
-
num_cu
;
sk_tiles
=
partial_dispatche_tiles
+
num_cu
;
}
// dp_num_blocks = dp_tiles;
// dp_start_block_idx = num_cu * sk_occupancy;
dp_iters_per_block
=
k_iters_per_tile
;
sk_total_iters
=
k_iters_per_tile
*
sk_tiles
;
// printf("num_tiles:%d, full_dispatches:%d, full_dispatch_tiles:%d,
// partial_dispatche_tiles:%d\n",
// num_tiles, full_dispatches, full_dispatch_tiles, partial_dispatche_tiles);
{
uint32_t
min_sk_tiles
=
(
sk_tiles
>=
num_cu
)
?
num_cu
:
(
sk_tiles
+
1
);
uint32_t
max_sk_tiles
=
(
sk_tiles
>=
num_cu
)
?
num_cu
*
sk_occupancy
:
impl
::
min
(
num_cu
,
sk_total_iters
/
min_k_iters_per_sk_block
);
// if use dp for sk-block, how many iters do we need
uint32_t
dp_for_sk_iters
=
k_iters_per_tile
;
uint32_t
best_sk_score
=
std
::
numeric_limits
<
int
>::
max
();
// we need to find the smallest sk iters
for
(
uint32_t
tentative_sk_blocks
=
min_sk_tiles
;
tentative_sk_blocks
<
max_sk_tiles
;
tentative_sk_blocks
++
)
{
uint32_t
tentative_sk_iters_per_block
=
(
sk_total_iters
+
tentative_sk_blocks
-
1
)
/
tentative_sk_blocks
;
uint32_t
tentative_sk_iters
=
tentative_sk_iters_per_block
;
uint32_t
sk_blocks_per_tile
=
(
tentative_sk_blocks
+
sk_tiles
-
1
)
/
sk_tiles
;
// TODO: carefully adjust this parameter
// the more sk_blocks_per_tile, the worse the overhead
uint32_t
cross_sk_blocks_overhead
=
sk_blocks_per_tile
;
if
(
tentative_sk_blocks
%
sk_tiles
!=
0
)
{
// penalty for uneven divide
cross_sk_blocks_overhead
+=
sk_blocks_per_tile
*
tentative_sk_iters_per_block
/
50
;
}
uint32_t
tentative_sk_score
=
tentative_sk_iters
+
cross_sk_blocks_overhead
;
if
(
tentative_sk_score
<
best_sk_score
)
{
best_sk_score
=
tentative_sk_score
;
sk_num_blocks
=
tentative_sk_blocks
;
}
}
if
(
best_sk_score
>=
dp_for_sk_iters
)
{
sk_num_blocks
=
0
;
}
if
(
sk_num_blocks
==
0
)
{
sk_num_big_blocks
=
0
;
k_iters_per_big_block
=
0
;
dp_num_blocks
=
num_tiles
;
// all tile to be dp block
dp_start_block_idx
=
0
;
sk_total_iters
=
0
;
// clear this tiles
}
else
{
uint32_t
k_iters_per_sk_block
=
sk_total_iters
/
sk_num_blocks
;
sk_num_big_blocks
=
sk_total_iters
-
k_iters_per_sk_block
*
sk_num_blocks
;
k_iters_per_big_block
=
k_iters_per_sk_block
+
1
;
dp_num_blocks
=
dp_tiles
;
dp_start_block_idx
=
(
sk_num_blocks
+
num_cu
-
1
)
/
num_cu
*
num_cu
;
}
}
}
};
struct
tile_work_t
{
uint32_t
tile_idx
;
uint32_t
iter_begin
;
uint32_t
k_begin
;
uint32_t
k_end
;
uint32_t
k_iters_remaining
;
};
int
main
(
int
argc
,
char
**
argv
)
{
simple_args_t
arg
=
create_arg
(
argc
,
argv
);
block_dispatcher_t
block_dispatcher
{
arg
.
get_uint32
(
"m_per_block"
),
arg
.
get_uint32
(
"n_per_block"
),
arg
.
get_uint32
(
"k_per_block"
),
arg
.
get_uint32
(
"num_cu"
),
arg
.
get_uint32
(
"occupancy"
),
arg
.
get_uint32
(
"m"
),
arg
.
get_uint32
(
"n"
),
arg
.
get_uint32
(
"k"
)};
block_dispatcher
.
dump
();
// simulate actual kernel launch
uint32_t
dim_x
=
block_dispatcher
.
get_grid_dims_x
();
uint32_t
total_k_iters
=
impl
::
integer_divide_ceil
(
arg
.
get_uint32
(
"k"
),
arg
.
get_uint32
(
"k_per_block"
));
uint32_t
num_tiles
=
impl
::
integer_divide_ceil
(
arg
.
get_uint32
(
"m"
),
arg
.
get_uint32
(
"m_per_block"
))
*
impl
::
integer_divide_ceil
(
arg
.
get_uint32
(
"n"
),
arg
.
get_uint32
(
"n_per_block"
));
std
::
vector
<
int
>
valid_tile_record
(
num_tiles
*
total_k_iters
);
for
(
uint32_t
bid
=
0
;
bid
<
dim_x
;
bid
++
)
{
uint32_t
block_idx
=
block_dispatcher
.
get_block_idx
(
bid
);
bool
is_sk_block
=
block_idx
<
(
block_dispatcher
.
sk_num_blocks
);
bool
is_dp_block
=
block_idx
>=
block_dispatcher
.
dp_start_block_idx
;
uint32_t
iter_start
,
iter_end
;
block_dispatcher
.
get_block_itr
(
block_idx
,
iter_start
,
iter_end
);
uint32_t
total_iter_length
=
iter_end
-
iter_start
;
while
(
true
)
{
uint32_t
iter_length_mod
=
iter_end
%
block_dispatcher
.
k_iters_per_tile
;
uint32_t
current_iter_length
=
impl
::
min
(
iter_length_mod
==
0
?
(
iter_end
-
iter_start
)
:
iter_length_mod
,
total_iter_length
);
uint32_t
tile_idx
=
(
iter_end
-
1
)
/
block_dispatcher
.
k_iters_per_tile
;
uint32_t
tile_iter_start
=
((
iter_end
-
1
)
%
block_dispatcher
.
k_iters_per_tile
)
-
current_iter_length
+
1
;
if
(
is_sk_block
)
{
printf
(
"[sk_block] bid:%3d, block_idx:%3d, tile_idx:%3d, iter_start:%d(%d | %d), "
"iter_end:%d (len:%d)
\n
"
,
bid
,
block_idx
,
tile_idx
,
iter_end
-
current_iter_length
,
tile_iter_start
,
iter_start
,
iter_end
,
current_iter_length
);
}
else
if
(
is_dp_block
)
{
printf
(
"[dp_block] bid:%3d, block_idx:%3d, tile_idx:%3d, iter_start:%d(%d | %d), "
"iter_end:%d (len:%d)
\n
"
,
bid
,
block_idx
,
tile_idx
,
iter_end
-
current_iter_length
,
tile_iter_start
,
iter_start
,
iter_end
,
current_iter_length
);
}
else
{
printf
(
"[other ] bid:%3d, block_idx:%3d
\n
"
,
bid
,
block_idx
);
}
// some validation check
for
(
auto
i
=
iter_end
-
current_iter_length
;
i
<
iter_end
;
i
++
)
{
if
(
i
>=
valid_tile_record
.
size
())
{
printf
(
"unexpected, current iter:%d larger than max:%d
\n
"
,
i
,
valid_tile_record
.
size
());
return
-
1
;
}
valid_tile_record
[
i
]
=
1
;
}
iter_end
-=
current_iter_length
;
if
(
iter_end
<=
iter_start
)
break
;
}
}
int
untouched
=
0
;
for
(
auto
i
=
0
;
i
<
valid_tile_record
.
size
();
i
++
)
{
if
(
valid_tile_record
[
i
]
!=
1
)
{
printf
(
"untouched at %d (%d)
\n
"
,
i
,
valid_tile_record
.
size
());
untouched
++
;
}
}
printf
(
"untouched %d/%d, %s
\n
"
,
untouched
,
valid_tile_record
.
size
(),
untouched
==
0
?
"valid"
:
"fail"
);
}
test/block_swizzle_test/rebuild.sh
0 → 100644
View file @
50540084
CC
=
g++
$CC
-Wall
-std
=
c++17
-Iinclude
-O3
block_swizzle_test.cpp
-o
block_swizzle_test.exe
\ No newline at end of file
test/block_swizzle_test/simple_args.h
0 → 100644
View file @
50540084
#pragma once
#include <iomanip>
#include <iostream>
#include <stdlib.h>
#include <string>
#include <unordered_map>
#include <vector>
#include <assert.h>
struct
arg_content_t
{
std
::
string
name
;
// key
std
::
string
value
;
std
::
string
help_text
;
};
class
simple_args_t
{
public:
simple_args_t
()
{}
simple_args_t
&
insert
(
const
std
::
string
&
name_
,
const
std
::
string
&
default_value_
,
const
std
::
string
&
help_text_
)
{
arg_content_t
arg
{
name_
,
default_value_
,
help_text_
};
if
(
arg_map
.
count
(
arg
.
name
)
!=
0
)
{
std
::
cout
<<
"arg:"
<<
arg
.
name
<<
"already exist"
<<
std
::
endl
;
}
else
{
arg_map
[
arg
.
name
]
=
arg
;
}
return
*
this
;
}
void
usage
()
{
for
(
auto
&
content
:
arg_map
)
{
std
::
vector
<
std
::
string
>
help_text_lines
;
size_t
pos
=
0
;
for
(
size_t
next_pos
=
content
.
second
.
help_text
.
find
(
'\n'
,
pos
);
next_pos
!=
std
::
string
::
npos
;)
{
help_text_lines
.
push_back
(
std
::
string
(
content
.
second
.
help_text
.
begin
()
+
pos
,
content
.
second
.
help_text
.
begin
()
+
next_pos
++
));
pos
=
next_pos
;
next_pos
=
content
.
second
.
help_text
.
find
(
'\n'
,
pos
);
}
help_text_lines
.
push_back
(
std
::
string
(
content
.
second
.
help_text
.
begin
()
+
pos
,
content
.
second
.
help_text
.
end
()));
int
arg_name_width
=
16
-
content
.
second
.
name
.
length
();
arg_name_width
=
arg_name_width
>
0
?
arg_name_width
:
2
;
std
::
cout
<<
std
::
setw
(
4
)
<<
"-"
<<
content
.
second
.
name
<<
std
::
setw
(
arg_name_width
)
<<
" "
<<
help_text_lines
[
0
]
<<
std
::
endl
;
for
(
auto
help_next_line
=
std
::
next
(
help_text_lines
.
begin
());
help_next_line
!=
help_text_lines
.
end
();
++
help_next_line
)
{
std
::
cout
<<
std
::
setw
(
28
)
<<
" "
<<
*
help_next_line
<<
std
::
endl
;
}
}
}
bool
parse
(
int
argc
,
char
*
argv
[],
int
start_index
=
1
)
{
if
(
argc
<=
start_index
)
{
// std::cout << "not enough args (" << argc << ") with starting index " << start_index
// << std::endl;
return
true
;
}
for
(
int
i
=
start_index
;
i
<
argc
;
i
++
)
{
std
::
string
cur_arg
=
std
::
string
(
argv
[
i
]);
if
(
cur_arg
[
0
]
!=
'-'
)
{
std
::
cout
<<
"illegal input"
<<
std
::
endl
;
usage
();
return
false
;
}
else
if
(
cur_arg
[
0
]
==
'-'
&&
cur_arg
[
1
]
==
'?'
)
{
usage
();
return
false
;
}
else
{
size_t
found_equal
=
cur_arg
.
find
(
'='
);
if
(
found_equal
==
std
::
string
::
npos
||
found_equal
==
(
cur_arg
.
length
()
-
1
))
{
std
::
cout
<<
"failed while parsing
\"
"
<<
cur_arg
<<
"
\"
, "
<<
"arg must be in the form
\"
-name=value
\"
"
<<
std
::
endl
;
return
false
;
}
std
::
string
arg_name
=
cur_arg
.
substr
(
1
,
found_equal
-
1
);
std
::
string
arg_value
=
cur_arg
.
substr
(
found_equal
+
1
);
if
(
arg_map
.
count
(
arg_name
)
==
0
)
{
std
::
cout
<<
"no such arg
\"
"
<<
arg_name
<<
"
\"
registered"
<<
std
::
endl
;
return
false
;
}
arg_map
[
arg_name
].
value
=
arg_value
;
}
}
return
true
;
}
std
::
string
get
(
const
std
::
string
&
name
)
const
{
return
get_str
(
name
);
}
std
::
string
get_str
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
std
::
string
value
=
arg_map
.
at
(
name
).
value
;
return
value
;
}
int
get_int
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
int
value
=
atoi
(
arg_map
.
at
(
name
).
value
.
c_str
());
return
value
;
}
uint32_t
get_uint32
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
uint32_t
value
=
strtoul
(
arg_map
.
at
(
name
).
value
.
c_str
(),
nullptr
,
10
);
return
value
;
}
uint64_t
get_uint64
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
uint64_t
value
=
strtoull
(
arg_map
.
at
(
name
).
value
.
c_str
(),
nullptr
,
10
);
return
value
;
}
double
get_double
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
double
value
=
atof
(
arg_map
.
at
(
name
).
value
.
c_str
());
return
value
;
}
float
get_float
(
const
std
::
string
&
name
)
const
{
assert
(
arg_map
.
count
(
name
)
!=
0
);
float
value
=
atof
(
arg_map
.
at
(
name
).
value
.
c_str
());
return
value
;
}
private:
std
::
unordered_map
<
std
::
string
,
arg_content_t
>
arg_map
;
};
test/gemm/CMakeLists.txt
View file @
50540084
if
(
DTYPES MATCHES
"fp32"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_fp32 gemm_fp32.cpp
)
add_test_executable
(
test_gemm_fp32 gemm_fp32.cpp
)
target_link_libraries
(
test_gemm_fp32 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp32 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp32 PRIVATE device_gemm_instance
)
target_link_libraries
(
test_gemm_fp32 PRIVATE device_gemm_instance
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_fp16 gemm_fp16.cpp
)
add_test_executable
(
test_gemm_fp16 gemm_fp16.cpp
)
target_link_libraries
(
test_gemm_fp16 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp16 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp16 PRIVATE device_gemm_instance
)
target_link_libraries
(
test_gemm_fp16 PRIVATE device_gemm_instance
)
add_test_executable
(
test_gemm_bf16 gemm_bf16.cpp
)
target_link_libraries
(
test_gemm_bf16 PRIVATE utility
)
target_link_libraries
(
test_gemm_bf16 PRIVATE device_gemm_instance
)
add_test_executable
(
test_gemm_int8 gemm_int8.cpp
)
target_link_libraries
(
test_gemm_int8 PRIVATE utility
)
target_link_libraries
(
test_gemm_int8 PRIVATE device_gemm_instance
)
add_library
(
gemm_standalone_xdl_fp16_instances STATIC
add_library
(
gemm_standalone_xdl_fp16_instances STATIC
instance/gemm_f16_nn_instance.cpp
instance/gemm_f16_nn_instance.cpp
instance/gemm_f16_nt_instance.cpp
instance/gemm_f16_nt_instance.cpp
...
@@ -24,3 +17,14 @@ add_library(gemm_standalone_xdl_fp16_instances STATIC
...
@@ -24,3 +17,14 @@ add_library(gemm_standalone_xdl_fp16_instances STATIC
add_test_executable
(
test_gemm_standalone_xdl_fp16 gemm_standalone_xdl_fp16.cpp
)
add_test_executable
(
test_gemm_standalone_xdl_fp16 gemm_standalone_xdl_fp16.cpp
)
target_link_libraries
(
test_gemm_standalone_xdl_fp16 PRIVATE gemm_standalone_xdl_fp16_instances utility
)
target_link_libraries
(
test_gemm_standalone_xdl_fp16 PRIVATE gemm_standalone_xdl_fp16_instances utility
)
target_include_directories
(
test_gemm_standalone_xdl_fp16 PRIVATE instance/
)
target_include_directories
(
test_gemm_standalone_xdl_fp16 PRIVATE instance/
)
endif
()
if
(
DTYPES MATCHES
"bf16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_bf16 gemm_bf16.cpp
)
target_link_libraries
(
test_gemm_bf16 PRIVATE utility
)
target_link_libraries
(
test_gemm_bf16 PRIVATE device_gemm_instance
)
endif
()
if
(
DTYPES MATCHES
"int8"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_int8 gemm_int8.cpp
)
target_link_libraries
(
test_gemm_int8 PRIVATE utility
)
target_link_libraries
(
test_gemm_int8 PRIVATE device_gemm_instance
)
endif
()
\ No newline at end of file
test/grouped_convnd_bwd_data/CMakeLists.txt
View file @
50540084
if
(
GPU_TARGETS MATCHES
"gfx908"
OR GPU_TARGETS MATCHES
"gfx90a"
OR GPU_TARGETS MATCHES
"gfx940"
)
if
(
GPU_TARGETS MATCHES
"gfx908"
OR GPU_TARGETS MATCHES
"gfx90a"
OR GPU_TARGETS MATCHES
"gfx940"
)
add_gtest_executable
(
test_grouped_convnd_bwd_data test_grouped_convnd_bwd_data.cpp
)
add_gtest_executable
(
test_grouped_convnd_bwd_data test_grouped_convnd_bwd_data.cpp
)
target_link_libraries
(
test_grouped_convnd_bwd_data PRIVATE utility device_grouped_conv2d_bwd_data_instance
)
target_link_libraries
(
test_grouped_convnd_bwd_data PRIVATE utility device_grouped_conv2d_bwd_data_instance
device_grouped_conv3d_bwd_data_instance
)
add_gtest_executable
(
test_grouped_convnd_bwd_data_interface test_grouped_convnd_bwd_data_interface.cpp
)
add_gtest_executable
(
test_grouped_convnd_bwd_data_interface test_grouped_convnd_bwd_data_interface.cpp
)
target_link_libraries
(
test_grouped_convnd_bwd_data_interface PRIVATE utility device_grouped_conv2d_bwd_data_instance
)
target_link_libraries
(
test_grouped_convnd_bwd_data_interface PRIVATE utility device_grouped_conv2d_bwd_data_instance
)
endif
()
endif
()
\ No newline at end of file
test/grouped_convnd_bwd_data/test_grouped_convnd_bwd_data.cpp
View file @
50540084
...
@@ -46,23 +46,36 @@ class TestGroupedConvndBwdData : public ::testing::Test
...
@@ -46,23 +46,36 @@ class TestGroupedConvndBwdData : public ::testing::Test
}
}
};
};
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
namespace
ck
::
tensor_layout
::
convolution
;
using
NHWGC
=
ck
::
tensor_layout
::
convolution
::
NHWGC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
KernelTypes2d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNHWK
,
GKYXC
,
GNHWC
>
,
std
::
tuple
<
ck
::
half_t
,
GNHWK
,
GKYXC
,
GNHWC
>
,
std
::
tuple
<
ck
::
bhalf_t
,
GNHWK
,
GKYXC
,
GNHWC
>
,
std
::
tuple
<
float
,
NHWGK
,
GKYXC
,
NHWGC
>
,
std
::
tuple
<
ck
::
half_t
,
NHWGK
,
GKYXC
,
NHWGC
>
,
std
::
tuple
<
ck
::
bhalf_t
,
NHWGK
,
GKYXC
,
NHWGC
>>
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
KernelTypes3d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNDHWK
,
GKZYXC
,
GNDHWC
>
,
using
NHWGK
=
ck
::
tensor_layout
::
convolution
::
NHWGK
;
std
::
tuple
<
ck
::
half_t
,
GNDHWK
,
GKZYXC
,
GNDHWC
>
,
std
::
tuple
<
ck
::
bhalf_t
,
GNDHWK
,
GKZYXC
,
GNDHWC
>
,
std
::
tuple
<
float
,
NDHWGK
,
GKZYXC
,
NDHWGC
>
,
std
::
tuple
<
ck
::
half_t
,
NDHWGK
,
GKZYXC
,
NDHWGC
>
,
std
::
tuple
<
ck
::
bhalf_t
,
NDHWGK
,
GKZYXC
,
NDHWGC
>>
;
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNHWK
,
GKYXC
,
GNHWC
>
,
template
<
typename
Tuple
>
std
::
tuple
<
ck
::
half_t
,
GNHWK
,
GKYXC
,
GNHWC
>
,
class
TestGroupedConvndBwdData2d
:
public
TestGroupedConvndBwdData
<
Tuple
>
std
::
tuple
<
ck
::
bhalf_t
,
GNHWK
,
GKYXC
,
GNHWC
>
,
{
std
::
tuple
<
float
,
NHWGK
,
GKYXC
,
NHWGC
>
,
};
std
::
tuple
<
ck
::
half_t
,
NHWGK
,
GKYXC
,
NHWGC
>
,
std
::
tuple
<
ck
::
bhalf_t
,
NHWGK
,
GKYXC
,
NHWGC
>>
;
template
<
typename
Tuple
>
TYPED_TEST_SUITE
(
TestGroupedConvndBwdData
,
KernelTypes
);
class
TestGroupedConvndBwdData3d
:
public
TestGroupedConvndBwdData
<
Tuple
>
{
};
TYPED_TEST_SUITE
(
TestGroupedConvndBwdData2d
,
KernelTypes2d
);
TYPED_TEST_SUITE
(
TestGroupedConvndBwdData3d
,
KernelTypes3d
);
TYPED_TEST
(
TestGroupedConvndBwdData
,
Test2D
)
TYPED_TEST
(
TestGroupedConvndBwdData
2d
,
Test2D
)
{
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
clear
();
...
@@ -76,3 +89,15 @@ TYPED_TEST(TestGroupedConvndBwdData, Test2D)
...
@@ -76,3 +89,15 @@ TYPED_TEST(TestGroupedConvndBwdData, Test2D)
{
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
{
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
template
Run
<
2
>();
this
->
template
Run
<
2
>();
}
}
TYPED_TEST
(
TestGroupedConvndBwdData3d
,
Test3D
)
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
(
{
3
,
2
,
16
,
128
,
256
,
{
1
,
1
,
1
},
{
7
,
7
,
7
},
{
2
,
2
,
2
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
this
->
conv_params
.
push_back
(
{
3
,
2
,
2
,
128
,
256
,
{
3
,
3
,
3
},
{
14
,
14
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
{
3
,
2
,
32
,
128
,
256
,
{
1
,
1
,
1
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
this
->
template
Run
<
3
>();
}
test/grouped_convnd_bwd_weight/CMakeLists.txt
View file @
50540084
...
@@ -2,8 +2,10 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
...
@@ -2,8 +2,10 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
add_gtest_executable
(
test_grouped_convnd_bwd_weight grouped_convnd_bwd_weight.cpp
)
add_gtest_executable
(
test_grouped_convnd_bwd_weight
test_
grouped_convnd_bwd_weight.cpp
)
target_link_libraries
(
test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance
)
target_link_libraries
(
test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance
)
add_gtest_executable
(
test_grouped_convnd_bwd_weight_interface test_grouped_convnd_bwd_weight_interface.cpp
)
target_link_libraries
(
test_grouped_convnd_bwd_weight_interface PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance
)
set
(
target 1
)
set
(
target 1
)
endif
()
endif
()
endforeach
()
endforeach
()
\ No newline at end of file
test/grouped_convnd_bwd_weight/grouped_convnd_bwd_weight.cpp
→
test/grouped_convnd_bwd_weight/
test_
grouped_convnd_bwd_weight.cpp
View file @
50540084
...
@@ -9,64 +9,101 @@
...
@@ -9,64 +9,101 @@
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include "ck/utility/common_header.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "profiler/profile_grouped_conv_bwd_weight_impl.hpp"
#include "profiler/profile_grouped_conv_bwd_weight_impl.hpp"
template
<
typename
Tuple
>
template
<
typename
Tuple
>
class
TestGroupedConvndBwdWeight
:
public
::
testing
::
Test
class
TestGroupedConvndBwdWeight
:
public
::
testing
::
Test
{
{
protected:
protected:
using
DataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
InDataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
WeiDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
OutDataType
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
InLayout
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
using
WeiLayout
=
std
::
tuple_element_t
<
4
,
Tuple
>
;
using
OutLayout
=
std
::
tuple_element_t
<
5
,
Tuple
>
;
using
NDimSpatial
=
std
::
tuple_element_t
<
6
,
Tuple
>
;
std
::
vector
<
ck
::
utils
::
conv
::
ConvParam
>
conv_params
;
std
::
vector
<
ck
::
utils
::
conv
::
ConvParam
>
conv_params
;
ck
::
index_t
split_k
{
2
};
ck
::
index_t
split_k
{
2
};
template
<
ck
::
index_t
NDimSpatial
>
void
Run
()
void
Run
()
{
{
EXPECT_FALSE
(
conv_params
.
empty
());
bool
pass
=
true
;
for
(
auto
&
param
:
conv_params
)
for
(
auto
&
param
:
conv_params
)
{
{
bool
pass
;
pass
=
pass
&&
ck
::
profiler
::
profile_grouped_conv_bwd_weight_impl
<
NDimSpatial
{},
EXPECT_FALSE
(
conv_params
.
empty
());
InLayout
,
pass
=
ck
::
profiler
::
profile_grouped_conv_bwd_weight_impl
<
WeiLayout
,
NDimSpatial
,
OutLayout
,
ck
::
tuple_element_t
<
NDimSpatial
-
1
,
InDataType
,
ck
::
Tuple
<
ck
::
tensor_layout
::
convolution
::
GNWC
,
WeiDataType
,
ck
::
tensor_layout
::
convolution
::
GNHWC
,
OutDataType
>
(
ck
::
tensor_layout
::
convolution
::
GNDHWC
>>
,
true
,
// do_verification
ck
::
tuple_element_t
<
NDimSpatial
-
1
,
1
,
// init_method: integer value
ck
::
Tuple
<
ck
::
tensor_layout
::
convolution
::
GKXC
,
false
,
// do_log
ck
::
tensor_layout
::
convolution
::
GKYXC
,
false
,
// time_kernel
ck
::
tensor_layout
::
convolution
::
GKZYXC
>>
,
param
,
ck
::
tuple_element_t
<
NDimSpatial
-
1
,
split_k
);
ck
::
Tuple
<
ck
::
tensor_layout
::
convolution
::
GNWK
,
ck
::
tensor_layout
::
convolution
::
GNHWK
,
ck
::
tensor_layout
::
convolution
::
GNDHWK
>>
,
DataType
,
DataType
,
DataType
>
(
true
,
// do_verification
1
,
// init_method: integer value
false
,
// do_log
false
,
// time_kernel
param
,
split_k
);
EXPECT_TRUE
(
pass
);
}
}
EXPECT_TRUE
(
pass
);
}
}
};
};
using
KernelTypes
=
template
<
typename
Tuple
>
::
testing
::
Types
<
std
::
tuple
<
float
>
,
std
::
tuple
<
ck
::
half_t
>
,
std
::
tuple
<
ck
::
bhalf_t
>>
;
class
TestGroupedConvndBwdWeight1d
:
public
TestGroupedConvndBwdWeight
<
Tuple
>
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeight
,
KernelTypes
);
{
};
template
<
typename
Tuple
>
class
TestGroupedConvndBwdWeight2d
:
public
TestGroupedConvndBwdWeight
<
Tuple
>
{
};
template
<
typename
Tuple
>
class
TestGroupedConvndBwdWeight3d
:
public
TestGroupedConvndBwdWeight
<
Tuple
>
{
};
using
namespace
ck
::
tensor_layout
::
convolution
;
using
KernelTypes1d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
float
,
float
,
GNWC
,
GKXC
,
GNWK
,
ck
::
Number
<
1
>>
,
std
::
tuple
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
GNWC
,
GKXC
,
GNWK
,
ck
::
Number
<
1
>>
,
std
::
tuple
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
GNWC
,
GKXC
,
GNWK
,
ck
::
Number
<
1
>>>
;
using
KernelTypes2d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
float
,
float
,
GNHWC
,
GKYXC
,
GNHWK
,
ck
::
Number
<
2
>>
,
std
::
tuple
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
GNHWC
,
GKYXC
,
GNHWK
,
ck
::
Number
<
2
>>
,
std
::
tuple
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
GNHWC
,
GKYXC
,
GNHWK
,
ck
::
Number
<
2
>>
,
std
::
tuple
<
float
,
float
,
float
,
NHWGC
,
GKYXC
,
NHWGK
,
ck
::
Number
<
2
>>
,
std
::
tuple
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
NHWGC
,
GKYXC
,
NHWGK
,
ck
::
Number
<
2
>>
,
std
::
tuple
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
NHWGC
,
GKYXC
,
NHWGK
,
ck
::
Number
<
2
>>>
;
using
KernelTypes3d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
float
,
float
,
GNDHWC
,
GKZYXC
,
GNDHWK
,
ck
::
Number
<
3
>>
,
std
::
tuple
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
GNDHWC
,
GKZYXC
,
GNDHWK
,
ck
::
Number
<
3
>>
,
std
::
tuple
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
GNDHWC
,
GKZYXC
,
GNDHWK
,
ck
::
Number
<
3
>>
,
std
::
tuple
<
float
,
float
,
float
,
NDHWGC
,
GKZYXC
,
NDHWGK
,
ck
::
Number
<
3
>>
,
std
::
tuple
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
NDHWGC
,
GKZYXC
,
NDHWGK
,
ck
::
Number
<
3
>>
,
std
::
tuple
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
NDHWGC
,
GKZYXC
,
NDHWGK
,
ck
::
Number
<
3
>>>
;
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeight1d
,
KernelTypes1d
);
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeight2d
,
KernelTypes2d
);
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeight3d
,
KernelTypes3d
);
TYPED_TEST
(
TestGroupedConvndBwdWeight
,
Test1D
)
TYPED_TEST
(
TestGroupedConvndBwdWeight
1d
,
Test1D
)
{
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
14
},
{
2
},
{
1
},
{
0
},
{
0
}});
this
->
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
14
},
{
2
},
{
1
},
{
0
},
{
0
}});
this
->
conv_params
.
push_back
({
1
,
2
,
32
,
128
,
256
,
{
3
},
{
28
},
{
1
},
{
1
},
{
1
},
{
1
}});
this
->
conv_params
.
push_back
({
1
,
2
,
32
,
128
,
256
,
{
3
},
{
28
},
{
1
},
{
1
},
{
1
},
{
1
}});
this
->
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
3
},
{
1
},
{
1
},
{
0
},
{
0
}});
this
->
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
3
},
{
1
},
{
1
},
{
0
},
{
0
}});
this
->
template
Run
<
1
>
();
this
->
Run
();
}
}
TYPED_TEST
(
TestGroupedConvndBwdWeight
,
Test2D
)
TYPED_TEST
(
TestGroupedConvndBwdWeight
2d
,
Test2D
)
{
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
(
this
->
conv_params
.
push_back
(
...
@@ -75,10 +112,10 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test2D)
...
@@ -75,10 +112,10 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test2D)
{
2
,
2
,
4
,
128
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
{
2
,
2
,
4
,
128
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
conv_params
.
push_back
(
this
->
conv_params
.
push_back
(
{
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
{
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
template
Run
<
2
>
();
this
->
Run
();
}
}
TYPED_TEST
(
TestGroupedConvndBwdWeight
,
Test3D
)
TYPED_TEST
(
TestGroupedConvndBwdWeight
3d
,
Test3D
)
{
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
(
this
->
conv_params
.
push_back
(
...
@@ -87,5 +124,5 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test3D)
...
@@ -87,5 +124,5 @@ TYPED_TEST(TestGroupedConvndBwdWeight, Test3D)
{
3
,
2
,
2
,
128
,
256
,
{
3
,
3
,
3
},
{
14
,
14
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
{
3
,
2
,
2
,
128
,
256
,
{
3
,
3
,
3
},
{
14
,
14
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
this
->
conv_params
.
push_back
(
{
3
,
2
,
32
,
128
,
256
,
{
1
,
1
,
1
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
{
3
,
2
,
32
,
128
,
256
,
{
1
,
1
,
1
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
this
->
template
Run
<
3
>
();
this
->
Run
();
}
}
test/grouped_convnd_bwd_weight/test_grouped_convnd_bwd_weight_interface.cpp
0 → 100644
View file @
50540084
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <tuple>
#include <vector>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/convolution_backward_weight_specialization.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_xdl_cshuffle.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
#include <gtest/gtest.h>
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
ConvolutionBackwardWeightSpecialization
=
ck
::
tensor_operation
::
device
::
ConvolutionBackwardWeightSpecialization
;
static
constexpr
auto
ConvBwdWeightDefault
=
ConvolutionBackwardWeightSpecialization
::
Default
;
static
constexpr
auto
Filter1x1Stride1Pad0
=
ConvolutionBackwardWeightSpecialization
::
Filter1x1Stride1Pad0
;
template
<
typename
Tuple
,
ConvolutionBackwardWeightSpecialization
ConvSpec
>
class
TestGroupedConvndBwdWeight
:
public
::
testing
::
Test
{
protected:
static
constexpr
ck
::
index_t
NDimSpatial
=
2
;
using
InLayout
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
WeiLayout
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
OutLayout
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
// clang-format off
using
GroupedConvBwdWeightDeviceInstance
=
ck
::
tensor_operation
::
device
::
DeviceGroupedConvBwdWeight_Xdl_CShuffle
//##########| Num| InLayout| WeiLayout| OutLayout| InData| WeiData| OutData| AccData| In| Wei| Out| ConvBackward| Block| MPer| NPer| K0Per| K1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransfer| CBlockTransfer|
//##########| Dim| | | | Type| Type| Type| Type| Elementwise| Elementwise| Elementwise| Weight| Size| Block| Block| Block| | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| ClusterLengths| ScalarPerVector|
//##########| Spatial| | | | | | | | Operation| Operation| Operation| Specialization| | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| MBlock_MPerBlock| NWaveNPerXdl|
//##########| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | NBlock_NPerBlock| |
<
NDimSpatial
,
InLayout
,
WeiLayout
,
OutLayout
,
F16
,
F16
,
F16
,
F32
,
PassThrough
,
PassThrough
,
PassThrough
,
ConvSpec
,
128
,
32
,
128
,
4
,
8
,
32
,
32
,
1
,
2
,
S
<
1
,
4
,
4
,
8
>
,
S
<
0
,
3
,
1
,
2
>
,
S
<
0
,
2
,
1
,
3
>
,
2
,
8
,
1
,
true
,
S
<
1
,
4
,
16
,
2
>
,
S
<
0
,
3
,
1
,
2
>
,
S
<
0
,
2
,
1
,
3
>
,
2
,
8
,
4
,
true
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
8
>
;
// clang-format on
ck
::
utils
::
conv
::
ConvParam
conv_param
;
ck
::
index_t
split_k
{
2
};
template
<
ck
::
index_t
NDimSpatial
>
bool
Run
()
{
const
auto
in_g_n_c_wis_desc
=
ck
::
utils
::
conv
::
make_input_host_tensor_descriptor_g_n_c_wis_packed
<
InLayout
>
(
conv_param
);
const
auto
wei_g_k_c_xs_desc
=
ck
::
utils
::
conv
::
make_weight_host_tensor_descriptor_g_k_c_xs_packed
<
WeiLayout
>
(
conv_param
);
const
auto
out_g_n_k_wos_desc
=
ck
::
utils
::
conv
::
make_output_host_tensor_descriptor_g_n_k_wos_packed
<
OutLayout
>
(
conv_param
);
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
+
3
>
input_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
+
3
>
output_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
{};
auto
range_copy
=
[](
const
auto
&
from
,
auto
to
)
{
std
::
copy
(
begin
(
from
),
end
(
from
),
to
);
};
range_copy
(
conv_param
.
input_spatial_lengths_
,
begin
(
input_spatial_lengths
));
range_copy
(
conv_param
.
filter_spatial_lengths_
,
begin
(
filter_spatial_lengths
));
range_copy
(
conv_param
.
output_spatial_lengths_
,
begin
(
output_spatial_lengths
));
range_copy
(
in_g_n_c_wis_desc
.
GetStrides
(),
begin
(
input_strides
));
range_copy
(
out_g_n_k_wos_desc
.
GetStrides
(),
begin
(
output_strides
));
range_copy
(
conv_param
.
conv_filter_strides_
,
begin
(
conv_filter_strides
));
range_copy
(
conv_param
.
conv_filter_dilations_
,
begin
(
conv_filter_dilations
));
range_copy
(
conv_param
.
input_left_pads_
,
begin
(
input_left_pads
));
range_copy
(
conv_param
.
input_right_pads_
,
begin
(
input_right_pads
));
auto
conv
=
GroupedConvBwdWeightDeviceInstance
{};
auto
argument
=
conv
.
MakeArgument
(
nullptr
,
nullptr
,
nullptr
,
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
C_
,
input_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
input_strides
,
output_strides
,
conv_filter_strides
,
conv_filter_dilations
,
input_left_pads
,
input_right_pads
,
PassThrough
{},
PassThrough
{},
PassThrough
{},
split_k
);
return
conv
.
IsSupportedArgument
(
argument
);
}
};
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
NHWGC
=
ck
::
tensor_layout
::
convolution
::
NHWGC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
NHWGK
=
ck
::
tensor_layout
::
convolution
::
NHWGK
;
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
GNHWK
,
GKYXC
,
GNHWC
>
,
std
::
tuple
<
NHWGK
,
GKYXC
,
NHWGC
>>
;
template
<
typename
Tuple
>
class
TestGroupedConvndBwdWeightDefault
:
public
TestGroupedConvndBwdWeight
<
Tuple
,
ConvBwdWeightDefault
>
{
};
template
<
typename
Tuple
>
class
TestGroupedConvndBwdWeightFilter1x1
:
public
TestGroupedConvndBwdWeight
<
Tuple
,
Filter1x1Stride1Pad0
>
{
};
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeightDefault
,
KernelTypes
);
TYPED_TEST_SUITE
(
TestGroupedConvndBwdWeightFilter1x1
,
KernelTypes
);
TYPED_TEST
(
TestGroupedConvndBwdWeightFilter1x1
,
SpecializationCheck
)
{
// Check filter 3,3 instead of 1,1
this
->
conv_param
=
{
2
,
2
,
4
,
192
,
192
,
{
3
,
3
},
{
28
,
28
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}};
bool
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_FALSE
(
is_supported
);
// Check strides 2,2 instead of 1,1
this
->
conv_param
=
{
2
,
2
,
4
,
192
,
192
,
{
1
,
1
},
{
28
,
28
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}};
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_FALSE
(
is_supported
);
// Check with pad
this
->
conv_param
=
{
2
,
2
,
4
,
192
,
192
,
{
1
,
1
},
{
28
,
28
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}};
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_FALSE
(
is_supported
);
// Supported version
this
->
conv_param
=
{
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}};
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_TRUE
(
is_supported
);
}
TYPED_TEST
(
TestGroupedConvndBwdWeightDefault
,
VectorLoadCheck
)
{
// vector load for A
this
->
conv_param
=
{
2
,
2
,
128
,
129
,
256
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}};
bool
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_FALSE
(
is_supported
);
// vector load for B, E, Ds
this
->
conv_param
=
{
2
,
2
,
128
,
128
,
257
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}};
is_supported
=
this
->
template
Run
<
2
>();
EXPECT_FALSE
(
is_supported
);
}
Prev
1
…
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment