Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
a1841d55
Commit
a1841d55
authored
Aug 01, 2022
by
Chao Liu
Browse files
Merge remote-tracking branch 'origin/develop' into lwpck-367
parents
127bf7f4
500fa995
Changes
373
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
426 additions
and
1589 deletions
+426
-1589
profiler/src/profile_convnd_bwd_data.cpp
profiler/src/profile_convnd_bwd_data.cpp
+0
-229
profiler/src/profile_convnd_bwd_weight.cpp
profiler/src/profile_convnd_bwd_weight.cpp
+0
-226
profiler/src/profile_convnd_fwd.cpp
profiler/src/profile_convnd_fwd.cpp
+0
-359
profiler/src/profile_gemm.cpp
profiler/src/profile_gemm.cpp
+33
-27
profiler/src/profile_gemm_add_add_fastgelu.cpp
profiler/src/profile_gemm_add_add_fastgelu.cpp
+16
-10
profiler/src/profile_gemm_bilinear.cpp
profiler/src/profile_gemm_bilinear.cpp
+14
-11
profiler/src/profile_grouped_conv_fwd.cpp
profiler/src/profile_grouped_conv_fwd.cpp
+258
-0
profiler/src/profile_grouped_gemm.cpp
profiler/src/profile_grouped_gemm.cpp
+4
-4
profiler/src/profile_reduce.cpp
profiler/src/profile_reduce.cpp
+1
-1
profiler/src/profiler.cpp
profiler/src/profiler.cpp
+13
-32
test/CMakeLists.txt
test/CMakeLists.txt
+2
-2
test/batched_gemm/CMakeLists.txt
test/batched_gemm/CMakeLists.txt
+1
-1
test/batched_gemm_reduce/CMakeLists.txt
test/batched_gemm_reduce/CMakeLists.txt
+1
-1
test/conv2d_bwd_data/CMakeLists.txt
test/conv2d_bwd_data/CMakeLists.txt
+0
-3
test/conv2d_bwd_data/conv2d_bwd_data.cpp
test/conv2d_bwd_data/conv2d_bwd_data.cpp
+0
-330
test/conv2d_bwd_weight/CMakeLists.txt
test/conv2d_bwd_weight/CMakeLists.txt
+0
-2
test/conv2d_bwd_weight/conv2d_bwd_weight.cpp
test/conv2d_bwd_weight/conv2d_bwd_weight.cpp
+0
-217
test/conv_util/CMakeLists.txt
test/conv_util/CMakeLists.txt
+1
-1
test/conv_util/conv_util.cpp
test/conv_util/conv_util.cpp
+80
-131
test/convnd_bwd_data/CMakeLists.txt
test/convnd_bwd_data/CMakeLists.txt
+2
-2
No files found.
profiler/src/profile_convnd_bwd_data.cpp
deleted
100644 → 0
View file @
127bf7f4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/include/profile_convnd_bwd_data_impl.hpp"
namespace
{
enum
struct
ConvDataType
{
F32_F32_F32
,
// 0
F16_F16_F16
,
// 1
BF16_BF16_BF16
,
// 2
INT8_INT8_INT8
,
// 3
};
enum
struct
ConvInputLayout
{
NCHW
,
// 0
NHWC
,
// 1
};
enum
struct
ConvWeightLayout
{
KCYX
,
// 0
KYXC
,
// 1
};
enum
struct
ConvOutputLayout
{
NKHW
,
// 0
NHWK
,
// 1
};
ck
::
utils
::
conv
::
ConvParams
parse_conv_params
(
int
num_dim_spatial
,
char
*
argv
[],
int
arg_idx
)
{
// (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
ck
::
utils
::
conv
::
ConvParams
params
;
params
.
num_dim_spatial_
=
num_dim_spatial
;
params
.
N_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
K_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
C_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
filter_spatial_lengths_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
filter_spatial_lengths_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_spatial_lengths_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_spatial_lengths_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
conv_filter_strides_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
conv_filter_strides_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
conv_filter_dilations_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
conv_filter_dilations_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_left_pads_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_left_pads_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_right_pads_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_right_pads_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
return
params
;
}
}
// namespace
int
profile_convnd_bwd_data
(
int
argc
,
char
*
argv
[],
int
num_dim_spatial
)
{
const
int
preParams
=
10
;
int
conv_args
=
3
+
num_dim_spatial
*
6
;
int
cmdline_nargs
=
conv_args
+
preParams
;
if
(
cmdline_nargs
!=
argc
)
{
printf
(
"arg1: tensor operation (conv[1|2|3]d_bwd_data: BackwardConvolution)
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16)
\n
"
);
printf
(
"arg3: input tensor layout (0: NCHW; 1: NHWC)
\n
"
);
printf
(
"arg4: weight tensor layout (0: KCYX; 1: KYXC)
\n
"
);
printf
(
"arg5: output tensor layout (0: NKHW; 1: NHWK)
\n
"
);
printf
(
"arg6: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg7: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg8: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg9: time kernel (0=n0, 1=yes)
\n
"
);
printf
(
"arg10 to 24: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx
\n
"
);
return
1
;
}
const
auto
data_type
=
static_cast
<
ConvDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
in_layout
=
static_cast
<
ConvInputLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
auto
wei_layout
=
static_cast
<
ConvWeightLayout
>
(
std
::
stoi
(
argv
[
4
]));
const
auto
out_layout
=
static_cast
<
ConvOutputLayout
>
(
std
::
stoi
(
argv
[
5
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
6
]);
const
int
init_method
=
std
::
stoi
(
argv
[
7
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
8
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
9
]);
ck
::
utils
::
conv
::
ConvParams
params
=
parse_conv_params
(
num_dim_spatial
,
argv
,
preParams
);
auto
Run
=
[
&
](
auto
input_type
,
auto
wei_type
,
auto
out_type
,
auto
acc_type
)
{
using
InDataType
=
decltype
(
input_type
);
using
WeiDataType
=
decltype
(
wei_type
);
using
OutDataType
=
decltype
(
out_type
);
using
AccDataType
=
decltype
(
acc_type
);
switch
(
num_dim_spatial
)
{
case
1
:
ck
::
profiler
::
profile_convnd_bwd_data_impl
<
1
,
InDataType
,
WeiDataType
,
OutDataType
,
AccDataType
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
);
break
;
case
2
:
ck
::
profiler
::
profile_convnd_bwd_data_impl
<
2
,
InDataType
,
WeiDataType
,
OutDataType
,
AccDataType
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
);
break
;
case
3
:
ck
::
profiler
::
profile_convnd_bwd_data_impl
<
3
,
InDataType
,
WeiDataType
,
OutDataType
,
AccDataType
,
ck
::
tensor_layout
::
convolution
::
NDHWC
,
ck
::
tensor_layout
::
convolution
::
KZYXC
,
ck
::
tensor_layout
::
convolution
::
NDHWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
);
break
;
default:
break
;
}
};
if
(
data_type
==
ConvDataType
::
F32_F32_F32
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
float
{},
float
{},
float
{},
float
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
ck
::
half_t
{},
ck
::
half_t
{},
ck
::
half_t
{},
float
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
ck
::
bhalf_t
{},
ck
::
bhalf_t
{},
ck
::
bhalf_t
{},
float
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
int8_t
{},
int8_t
{},
int8_t
{},
int32_t
{});
}
else
{
std
::
cout
<<
"wrong! this Conv data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
return
0
;
}
profiler/src/profile_convnd_bwd_weight.cpp
deleted
100644 → 0
View file @
127bf7f4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/include/profile_convnd_bwd_weight_impl.hpp"
namespace
{
enum
struct
ConvDataType
{
F32_F32_F32
,
// 0
F16_F16_F16
,
// 1
BF16_BF16_BF16
,
// 2
};
enum
struct
ConvInputLayout
{
NCHW
,
// 0
NHWC
,
// 1
};
enum
struct
ConvWeightLayout
{
KCYX
,
// 0
KYXC
,
// 1
};
enum
struct
ConvOutputLayout
{
NKHW
,
// 0
NHWK
,
// 1
};
ck
::
utils
::
conv
::
ConvParams
parse_conv_params
(
int
num_dim_spatial
,
char
*
argv
[],
int
arg_idx
)
{
// (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
ck
::
utils
::
conv
::
ConvParams
params
;
params
.
num_dim_spatial_
=
num_dim_spatial
;
params
.
N_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
K_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
C_
=
std
::
stoi
(
argv
[
arg_idx
++
]);
params
.
filter_spatial_lengths_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
filter_spatial_lengths_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_spatial_lengths_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_spatial_lengths_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
conv_filter_strides_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
conv_filter_strides_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
conv_filter_dilations_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
conv_filter_dilations_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_left_pads_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_left_pads_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
params
.
input_right_pads_
.
resize
(
num_dim_spatial
);
for
(
int
i
=
0
;
i
<
num_dim_spatial
;
++
i
)
{
params
.
input_right_pads_
[
i
]
=
std
::
stoi
(
argv
[
arg_idx
++
]);
}
return
params
;
}
}
// namespace
int
profile_convnd_bwd_weight
(
int
argc
,
char
*
argv
[],
int
num_dim_spatial
)
{
const
int
preParams
=
11
;
int
conv_args
=
3
+
num_dim_spatial
*
6
;
int
cmdline_nargs
=
conv_args
+
preParams
;
if
(
cmdline_nargs
!=
argc
)
{
printf
(
"arg1: tensor operation (convnd[1|2|3]d_bwd_weight: BackwardConvolution)
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16, 2: bf16)
\n
"
);
printf
(
"arg3: input tensor layout (0: NCHW; 1: NHWC)
\n
"
);
printf
(
"arg4: weight tensor layout (0: KCYX; 1: KYXC)
\n
"
);
printf
(
"arg5: output tensor layout (0: NKHW; 1: NHWK)
\n
"
);
printf
(
"arg6: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg7: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg8: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg9: time kernel (0=n0, 1=yes)
\n
"
);
printf
(
"arg10: splitk
\n
"
);
printf
(
"arg11 to 25: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx
\n
"
);
return
1
;
}
const
auto
data_type
=
static_cast
<
ConvDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
in_layout
=
static_cast
<
ConvInputLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
auto
wei_layout
=
static_cast
<
ConvWeightLayout
>
(
std
::
stoi
(
argv
[
4
]));
const
auto
out_layout
=
static_cast
<
ConvOutputLayout
>
(
std
::
stoi
(
argv
[
5
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
6
]);
const
int
init_method
=
std
::
stoi
(
argv
[
7
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
8
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
9
]);
ck
::
index_t
split_k
=
std
::
stoi
(
argv
[
10
]);
split_k
=
std
::
max
(
1
,
split_k
);
ck
::
utils
::
conv
::
ConvParams
params
=
parse_conv_params
(
num_dim_spatial
,
argv
,
preParams
);
auto
Run
=
[
&
](
auto
input_type
,
auto
wei_type
,
auto
out_type
)
{
using
InDataType
=
decltype
(
input_type
);
using
WeiDataType
=
decltype
(
wei_type
);
using
OutDataType
=
decltype
(
out_type
);
switch
(
num_dim_spatial
)
{
case
1
:
ck
::
profiler
::
profile_convnd_bwd_weight_impl
<
1
,
InDataType
,
WeiDataType
,
OutDataType
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
,
split_k
);
break
;
case
2
:
ck
::
profiler
::
profile_convnd_bwd_weight_impl
<
2
,
InDataType
,
WeiDataType
,
OutDataType
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
,
split_k
);
break
;
case
3
:
ck
::
profiler
::
profile_convnd_bwd_weight_impl
<
3
,
InDataType
,
WeiDataType
,
OutDataType
,
ck
::
tensor_layout
::
convolution
::
NDHWC
,
ck
::
tensor_layout
::
convolution
::
KZYXC
,
ck
::
tensor_layout
::
convolution
::
NDHWK
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
.
N_
,
params
.
K_
,
params
.
C_
,
params
.
input_spatial_lengths_
,
params
.
filter_spatial_lengths_
,
params
.
GetOutputSpatialLengths
(),
params
.
conv_filter_strides_
,
params
.
conv_filter_dilations_
,
params
.
input_left_pads_
,
params
.
input_right_pads_
,
split_k
);
break
;
default:
break
;
}
};
if
(
data_type
==
ConvDataType
::
F32_F32_F32
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
float
{},
float
{},
float
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
ck
::
half_t
{},
ck
::
half_t
{},
ck
::
half_t
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
&&
in_layout
==
ConvInputLayout
::
NHWC
&&
wei_layout
==
ConvWeightLayout
::
KYXC
&&
out_layout
==
ConvOutputLayout
::
NHWK
)
{
Run
(
ck
::
bhalf_t
{},
ck
::
bhalf_t
{},
ck
::
bhalf_t
{});
}
else
{
std
::
cout
<<
"wrong! this Conv data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
return
0
;
}
profiler/src/profile_convnd_fwd.cpp
deleted
100644 → 0
View file @
127bf7f4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <functional>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/conv_util.hpp"
#include "ck/library/utility/fill.hpp"
namespace
{
enum
struct
ConvDataType
{
F32_F32_F32
,
// 0
F16_F16_F16
,
// 1
BF16_BF16_BF16
,
// 2
INT8_INT8_INT8
,
// 3
};
enum
struct
ConvDataLayout
{
NCHW
,
// 0
NHWC
,
// 1
};
namespace
ctl
=
ck
::
tensor_layout
::
convolution
;
template
<
int
NDim
,
ConvDataLayout
DataLayout
>
struct
ConvolutionLayouts
;
template
<
>
struct
ConvolutionLayouts
<
1
,
ConvDataLayout
::
NHWC
>
{
typedef
ctl
::
NWC
Input
;
typedef
ctl
::
KXC
Weight
;
typedef
ctl
::
NWK
Output
;
};
template
<
>
struct
ConvolutionLayouts
<
2
,
ConvDataLayout
::
NHWC
>
{
typedef
ctl
::
NHWC
Input
;
typedef
ctl
::
KYXC
Weight
;
typedef
ctl
::
NHWK
Output
;
};
template
<
>
struct
ConvolutionLayouts
<
3
,
ConvDataLayout
::
NHWC
>
{
typedef
ctl
::
NDHWC
Input
;
typedef
ctl
::
KZYXC
Weight
;
typedef
ctl
::
NDHWK
Output
;
};
template
<
>
struct
ConvolutionLayouts
<
1
,
ConvDataLayout
::
NCHW
>
{
typedef
ctl
::
NCW
Input
;
typedef
ctl
::
KCX
Weight
;
typedef
ctl
::
NKW
Output
;
};
template
<
>
struct
ConvolutionLayouts
<
2
,
ConvDataLayout
::
NCHW
>
{
typedef
ctl
::
NCHW
Input
;
typedef
ctl
::
KCYX
Weight
;
typedef
ctl
::
NKHW
Output
;
};
template
<
>
struct
ConvolutionLayouts
<
3
,
ConvDataLayout
::
NCHW
>
{
typedef
ctl
::
NCDHW
Input
;
typedef
ctl
::
KCZYX
Weight
;
typedef
ctl
::
NKDHW
Output
;
};
void
print_use_msg
()
{
std
::
cout
<<
"arg1: tensor operation (conv_fwd: ForwardConvolution)
\n
"
<<
"arg2: data type (0: fp32; 1: fp16, 2: bf16, 3: int8)
\n
"
<<
"arg3: data layout (0: NCHW; 1: NHWC)
\n
"
<<
"arg4: verification (0=no, 1=yes)
\n
"
<<
"arg5: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg7: run kernel # of times (>1)
\n
"
<<
"arg8: N spatial dimensions (default 2)
\n
"
<<
"Following arguments (depending on number of spatial dims):
\n
"
<<
" N, K, C,
\n
"
<<
" <filter spatial dimensions>, (ie Y, X for 2D)
\n
"
<<
" <input image spatial dimensions>, (ie Hi, Wi for 2D)
\n
"
<<
" <strides>, (ie Sy, Sx for 2D)
\n
"
<<
" <dilations>, (ie Dy, Dx for 2D)
\n
"
<<
" <left padding>, (ie LeftPy, LeftPx for 2D)
\n
"
<<
" <right padding>, (ie RightPy, RightPx for 2D)
\n
"
<<
std
::
endl
;
}
ck
::
utils
::
conv
::
ConvParams
parse_params
(
int
num_dim_spatial
,
int
argc
,
char
*
argv
[])
{
// (N, K, C) + num_dim_spatial * 6 (filter, input, strides, dilations, pad left, pad right)
int
conv_args
=
3
+
num_dim_spatial
*
6
;
int
cmdline_nargs
=
conv_args
+
9
;
if
(
cmdline_nargs
!=
argc
)
{
print_use_msg
();
exit
(
1
);
}
int
arg_idx
=
9
;
return
ck
::
utils
::
conv
::
parse_conv_params
(
num_dim_spatial
,
arg_idx
,
argv
);
}
template
<
int
NDim
,
typename
InDataType
,
typename
WeiDataType
,
typename
OutDataType
,
typename
ConvLayouts
>
void
profile_convnd_instances_impl
(
const
ck
::
utils
::
conv
::
ConvParams
&
params
,
bool
do_verification
,
bool
do_log
,
bool
time_kernel
,
int
init_method
,
ConvLayouts
)
{
using
namespace
std
::
placeholders
;
using
namespace
ck
::
utils
;
std
::
unique_ptr
<
OpInstance
<
OutDataType
,
InDataType
,
WeiDataType
>>
conv_instance
;
switch
(
init_method
)
{
case
0
:
conv_instance
=
std
::
make_unique
<
conv
::
ConvFwdOpInstance
<
InDataType
,
WeiDataType
,
OutDataType
,
typename
ConvLayouts
::
Input
,
typename
ConvLayouts
::
Weight
,
typename
ConvLayouts
::
Output
>>
(
params
,
false
);
break
;
case
1
:
conv_instance
=
std
::
make_unique
<
conv
::
ConvFwdOpInstance
<
InDataType
,
WeiDataType
,
OutDataType
,
typename
ConvLayouts
::
Input
,
typename
ConvLayouts
::
Weight
,
typename
ConvLayouts
::
Output
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
utils
::
FillUniformDistributionIntegerValue
<
int
>
,
ck
::
utils
::
FillUniformDistributionIntegerValue
<
int
>>>
(
params
,
true
,
ck
::
utils
::
FillUniformDistributionIntegerValue
<
int
>
{},
ck
::
utils
::
FillUniformDistributionIntegerValue
<
int
>
{});
break
;
case
2
:
conv_instance
=
std
::
make_unique
<
conv
::
ConvFwdOpInstance
<
InDataType
,
WeiDataType
,
OutDataType
,
typename
ConvLayouts
::
Input
,
typename
ConvLayouts
::
Weight
,
typename
ConvLayouts
::
Output
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
utils
::
FillUniformDistribution
<
InDataType
>
,
ck
::
utils
::
FillUniformDistribution
<
WeiDataType
>>>
(
params
,
true
,
ck
::
utils
::
FillUniformDistribution
<
InDataType
>
{},
ck
::
utils
::
FillUniformDistribution
<
WeiDataType
>
{});
break
;
default:
throw
std
::
runtime_error
(
"Unsupported init method!"
);
}
auto
reference_conv_fwd_fun
=
std
::
bind
(
conv
::
run_reference_convolution_forward
<
NDim
,
InDataType
,
WeiDataType
,
OutDataType
>
,
params
,
_1
,
_2
,
_3
);
OpInstanceRunEngine
<
InDataType
,
WeiDataType
,
OutDataType
>
run_engine
(
*
conv_instance
,
reference_conv_fwd_fun
,
do_verification
);
auto
best_conf
=
run_engine
.
Profile
(
conv
::
ConvolutionFwdInstances
<
InDataType
,
WeiDataType
,
OutDataType
>::
template
Get
<
NDim
>(),
time_kernel
,
do_verification
,
do_log
);
std
::
cout
<<
"Best configuration parameters:"
<<
"
\n
name: "
<<
best_conf
.
best_op_name
<<
"
\n
avg_time: "
<<
best_conf
.
best_avg_time
<<
"
\n
tflops: "
<<
best_conf
.
best_tflops
<<
"
\n
GB/s: "
<<
best_conf
.
best_gb_per_sec
<<
std
::
endl
;
}
template
<
int
NDim
>
void
profile_convnd_instances
(
ConvDataType
data_type
,
ConvDataLayout
data_layout
,
const
ck
::
utils
::
conv
::
ConvParams
&
params
,
bool
do_verification
,
bool
do_log
,
bool
time_kernel
,
int
init_method
)
{
switch
(
data_layout
)
{
case
ConvDataLayout
::
NHWC
:
{
switch
(
data_type
)
{
case
ConvDataType
::
F32_F32_F32
:
profile_convnd_instances_impl
<
NDim
,
float
,
float
,
float
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NHWC
>
{});
break
;
case
ConvDataType
::
F16_F16_F16
:
profile_convnd_instances_impl
<
NDim
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NHWC
>
{});
break
;
case
ConvDataType
::
BF16_BF16_BF16
:
profile_convnd_instances_impl
<
NDim
,
ck
::
bhalf_t
,
ck
::
bhalf_t
,
ck
::
bhalf_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NHWC
>
{});
break
;
case
ConvDataType
::
INT8_INT8_INT8
:
profile_convnd_instances_impl
<
NDim
,
int8_t
,
int8_t
,
int8_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NHWC
>
{});
break
;
}
break
;
}
case
ConvDataLayout
::
NCHW
:
{
switch
(
data_type
)
{
case
ConvDataType
::
F32_F32_F32
:
profile_convnd_instances_impl
<
NDim
,
float
,
float
,
float
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NCHW
>
{});
break
;
case
ConvDataType
::
F16_F16_F16
:
profile_convnd_instances_impl
<
NDim
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NCHW
>
{});
break
;
case
ConvDataType
::
BF16_BF16_BF16
:
profile_convnd_instances_impl
<
NDim
,
ck
::
bhalf_t
,
ck
::
bhalf_t
,
ck
::
bhalf_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NCHW
>
{});
break
;
case
ConvDataType
::
INT8_INT8_INT8
:
profile_convnd_instances_impl
<
NDim
,
int8_t
,
int8_t
,
int8_t
>
(
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
,
ConvolutionLayouts
<
NDim
,
ConvDataLayout
::
NCHW
>
{});
break
;
}
break
;
}
}
}
}
// namespace
int
profile_convnd_fwd
(
int
argc
,
char
*
argv
[])
{
using
namespace
ck
::
utils
::
conv
;
ConvDataType
data_type
{
ConvDataType
::
F32_F32_F32
};
ConvDataLayout
data_layout
{
ConvDataLayout
::
NHWC
};
bool
do_verification
{
true
};
int
init_method
{
2
};
bool
do_log
{
false
};
bool
time_kernel
{
false
};
int
num_dim_spatial
{
2
};
ConvParams
params
;
if
(
argc
>=
4
)
{
data_type
=
static_cast
<
ConvDataType
>
(
std
::
stoi
(
argv
[
2
]));
data_layout
=
static_cast
<
ConvDataLayout
>
(
std
::
stoi
(
argv
[
3
]));
}
if
(
argc
>=
9
)
{
do_verification
=
std
::
stoi
(
argv
[
4
]);
init_method
=
std
::
stoi
(
argv
[
5
]);
do_log
=
std
::
stoi
(
argv
[
6
]);
time_kernel
=
std
::
stoi
(
argv
[
7
]);
num_dim_spatial
=
std
::
stoi
(
argv
[
8
]);
}
if
(
argc
>=
10
)
{
params
=
parse_params
(
num_dim_spatial
,
argc
,
argv
);
}
// TODO Print nice message what is being profiled.
switch
(
num_dim_spatial
)
{
case
1
:
profile_convnd_instances
<
1
>
(
data_type
,
data_layout
,
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
);
break
;
case
2
:
profile_convnd_instances
<
2
>
(
data_type
,
data_layout
,
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
);
break
;
case
3
:
profile_convnd_instances
<
3
>
(
data_type
,
data_layout
,
params
,
do_verification
,
do_log
,
time_kernel
,
init_method
);
break
;
default:
throw
std
::
runtime_error
(
"profile_conv_fwd: unsupported num_dim_spatial value: "
+
std
::
to_string
(
num_dim_spatial
));
}
return
0
;
}
profiler/src/profile_gemm.cpp
View file @
a1841d55
...
...
@@ -24,21 +24,27 @@ enum struct GemmDataType
INT8_INT8_INT8
,
// 3
};
static
void
print_helper_msg
()
{
std
::
cout
<<
"arg1: tensor operation (gemm: GEMM)
\n
"
<<
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)
\n
"
<<
"arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];
\n
"
<<
" 1: A[m, k] * B[n, k] = C[m, n];
\n
"
<<
" 2: A[k, m] * B[k, n] = C[m, n];
\n
"
<<
" 3: A[k, m] * B[n, k] = C[m, n])
\n
"
<<
"arg4: verification (0: no; 1: yes)
\n
"
<<
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg7: time kernel (0: no, 1: yes)
\n
"
<<
"arg8 to 13: M, N, K, StrideA, StrideB, StrideC
\n
"
<<
std
::
endl
;
}
int
profile_gemm
(
int
argc
,
char
*
argv
[])
{
if
(
argc
!=
14
)
{
printf
(
"arg1: tensor operation (gemm: GEMM)
\n
"
);
printf
(
"arg2: data type (0: fp32; 1: fp16; 2: bf16; 3: int8)
\n
"
);
printf
(
"arg3: matrix layout (0: A[m, k] * B[k, n] = C[m, n];
\n
"
);
printf
(
" 1: A[m, k] * B[n, k] = C[m, n];
\n
"
);
printf
(
" 2: A[k, m] * B[k, n] = C[m, n];
\n
"
);
printf
(
" 3: A[k, m] * B[n, k] = C[m, n])
\n
"
);
printf
(
"arg4: verification (0: no; 1: yes)
\n
"
);
printf
(
"arg5: initialization (0: no init; 1: integer value; 2: decimal value)
\n
"
);
printf
(
"arg6: print tensor value (0: no; 1: yes)
\n
"
);
printf
(
"arg7: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg8 to 13: M, N, K, StrideA, StrideB, StrideC
\n
"
);
print_helper_msg
();
exit
(
1
);
}
...
...
@@ -109,67 +115,67 @@ int profile_gemm(int argc, char* argv[])
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Row
{},
Row
{},
Row
{});
return
profile
(
Row
{},
Row
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Row
{},
Col
{},
Row
{});
return
profile
(
Row
{},
Col
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Col
{},
Row
{},
Row
{});
return
profile
(
Col
{},
Row
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
GemmDataType
::
F32_F32_F32
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
F32
{},
F32
{},
F32
{},
F32
{},
Col
{},
Col
{},
Row
{});
return
profile
(
Col
{},
Col
{},
Row
{},
F32
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{}
,
Row
{},
Row
{},
Row
{}
);
return
profile
(
Row
{},
Row
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{}
,
Row
{},
Col
{},
Row
{}
);
return
profile
(
Row
{},
Col
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{}
,
Col
{},
Row
{},
Row
{}
);
return
profile
(
Col
{},
Row
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
}
else
if
(
data_type
==
GemmDataType
::
F16_F16_F16
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{}
,
Col
{},
Col
{},
Row
{}
);
return
profile
(
Col
{},
Col
{},
Row
{},
F16
{},
F16
{},
F32
{},
F16
{});
}
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
BF16
{},
BF16
{},
F32
{},
BF16
{}
,
Row
{},
Row
{},
Row
{}
);
return
profile
(
Row
{},
Row
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
}
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
BF16
{},
BF16
{},
F32
{},
BF16
{}
,
Row
{},
Col
{},
Row
{}
);
return
profile
(
Row
{},
Col
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
}
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
BF16
{},
BF16
{},
F32
{},
BF16
{}
,
Col
{},
Row
{},
Row
{}
);
return
profile
(
Col
{},
Row
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
}
else
if
(
data_type
==
GemmDataType
::
BF16_BF16_BF16
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
BF16
{},
BF16
{},
F32
{},
BF16
{}
,
Col
{},
Col
{},
Row
{}
);
return
profile
(
Col
{},
Col
{},
Row
{},
BF16
{},
BF16
{},
F32
{},
BF16
{});
}
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
MK_KN_MN
)
{
return
profile
(
INT8
{},
INT8
{},
INT32
{},
INT8
{}
,
Row
{},
Row
{},
Row
{}
);
return
profile
(
Row
{},
Row
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
}
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
MK_NK_MN
)
{
return
profile
(
INT8
{},
INT8
{},
INT32
{},
INT8
{}
,
Row
{},
Col
{},
Row
{}
);
return
profile
(
Row
{},
Col
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
}
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
KM_KN_MN
)
{
return
profile
(
INT8
{},
INT8
{},
INT32
{},
INT8
{}
,
Col
{},
Row
{},
Row
{}
);
return
profile
(
Col
{},
Row
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
}
else
if
(
data_type
==
GemmDataType
::
INT8_INT8_INT8
&&
layout
==
GemmMatrixLayout
::
KM_NK_MN
)
{
return
profile
(
INT8
{},
INT8
{},
INT32
{},
INT8
{}
,
Col
{},
Col
{},
Row
{}
);
return
profile
(
Col
{},
Col
{},
Row
{},
INT8
{},
INT8
{},
INT32
{},
INT8
{});
}
else
{
...
...
profiler/src/profile_gemm_add_add_fastgelu.cpp
View file @
a1841d55
...
...
@@ -75,7 +75,9 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
de_layout
)
{
auto
d0_layout
,
auto
d1_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
...
...
@@ -85,13 +87,15 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
DELayout
=
decltype
(
de_layout
);
using
D0Layout
=
decltype
(
d0_layout
);
using
D1Layout
=
decltype
(
d1_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D
E
Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideD1
=
ck
::
is_same_v
<
D
E
Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
D
ELayout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideD0
=
ck
::
is_same_v
<
D
0
Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideD1
=
ck
::
is_same_v
<
D
1
Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_add_add_fastgelu_impl
<
ADataType
,
BDataType
,
...
...
@@ -101,7 +105,9 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
EDataType
,
ALayout
,
BLayout
,
DELayout
>
(
D0Layout
,
D1Layout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
...
...
@@ -120,22 +126,22 @@ int profile_gemm_add_add_fastgelu(int argc, char* argv[])
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_NK_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Col
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Row
{},
Col
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_KN_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Row
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_NK_MN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{},
Row
{},
Row
{});
}
else
{
...
...
profiler/src/profile_gemm_bilinear.cpp
View file @
a1841d55
...
...
@@ -77,21 +77,23 @@ int profile_gemm_bilinear(int argc, char* argv[])
auto
e_type
,
auto
a_layout
,
auto
b_layout
,
auto
de_layout
)
{
auto
d_layout
,
auto
e_layout
)
{
using
ADataType
=
decltype
(
a_type
);
using
BDataType
=
decltype
(
b_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
DDataType
=
decltype
(
d_type
);
using
EDataType
=
decltype
(
e_type
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
DELayout
=
decltype
(
de_layout
);
using
ALayout
=
decltype
(
a_layout
);
using
BLayout
=
decltype
(
b_layout
);
using
DLayout
=
decltype
(
d_layout
);
using
ELayout
=
decltype
(
e_layout
);
const
int
DefaultStrideA
=
ck
::
is_same_v
<
ALayout
,
Row
>
?
K
:
M
;
const
int
DefaultStrideB
=
ck
::
is_same_v
<
BLayout
,
Row
>
?
N
:
K
;
const
int
DefaultStrideD
=
ck
::
is_same_v
<
D
E
Layout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
D
ELayout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideD
=
ck
::
is_same_v
<
DLayout
,
Row
>
?
N
:
M
;
const
int
DefaultStrideE
=
ck
::
is_same_v
<
ELayout
,
Row
>
?
N
:
M
;
bool
pass
=
ck
::
profiler
::
profile_gemm_bilinear_impl
<
ADataType
,
BDataType
,
...
...
@@ -100,7 +102,8 @@ int profile_gemm_bilinear(int argc, char* argv[])
EDataType
,
ALayout
,
BLayout
,
DELayout
>
(
DLayout
,
ELayout
>
(
do_verification
,
init_method
,
do_log
,
...
...
@@ -120,19 +123,19 @@ int profile_gemm_bilinear(int argc, char* argv[])
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_KN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Row
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
MK_NK_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Row
{},
Col
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Row
{},
Col
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_KN_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Col
{},
Row
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Col
{},
Row
{},
Row
{},
Row
{});
}
else
if
(
data_type
==
MatrixDataType
::
F16_F16_F16_F16
&&
layout
==
MatrixLayout
::
KM_NK_MN_MN
)
{
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{});
return
profile
(
F16
{},
F16
{},
F32
{},
F16
{},
F16
{},
Col
{},
Col
{},
Row
{},
Row
{});
}
else
{
...
...
profiler/src/profile_grouped_conv_fwd.cpp
0 → 100644
View file @
a1841d55
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "profiler/include/profile_grouped_conv_fwd_impl.hpp"
namespace
{
enum
struct
ConvLayout
{
GNHWC_GKYXC_GNHWK
,
// 0
NHWGC_KYXGC_NHWGK
,
// 1
};
enum
struct
ConvDataType
{
F32_F32_F32
,
// 0
F16_F16_F16
,
// 1
BF16_BF16_BF16
,
// 2
INT8_INT8_INT8
,
// 3
};
static
void
print_helper_msg
()
{
std
::
cout
// clang-format off
<<
"arg1: tensor operation (grouped_conv_fwd: Grouped Convolution Forward)
\n
"
<<
"arg2: data type (0: Input fp32, Weight fp32, Output fp32
\n
"
<<
" 1: Input fp16, Weight fp16, Output fp16
\n
"
<<
" 2: Input bf16, Weight bf16, Output bf16
\n
"
<<
" 3: Input int8, Weight int8, Output int8)
\n
"
<<
"arg3: tensor layout (0: Input[G, N, Hi, Wi, C], Weight[G, K, Y, X, C], Output[G, N, Ho, Wo, K]
\n
"
<<
" 1: Input[N, Hi, Wi, G, C], Weight[K, Y, X, G, C], Output[N, Ho, Wo, G, K])
\n
"
<<
"arg4: verification (0: no, 1: yes)
\n
"
<<
"arg5: initialization (0: no init, 1: integer value, 2: decimal value)
\n
"
<<
"arg6: print tensor value (0: no; 1: yes)
\n
"
<<
"arg7: time kernel (0: no, 1: yes)
\n
"
<<
ck
::
utils
::
conv
::
get_conv_param_parser_helper_msg
()
<<
std
::
endl
;
// clang-format on
}
}
// namespace
int
profile_grouped_conv_fwd
(
int
argc
,
char
*
argv
[])
{
// 8 for control, 1 for num_dim_spatial
if
(
argc
<
9
)
{
print_helper_msg
();
return
1
;
}
const
auto
data_type
=
static_cast
<
ConvDataType
>
(
std
::
stoi
(
argv
[
2
]));
const
auto
layout
=
static_cast
<
ConvLayout
>
(
std
::
stoi
(
argv
[
3
]));
const
bool
do_verification
=
std
::
stoi
(
argv
[
4
]);
const
int
init_method
=
std
::
stoi
(
argv
[
5
]);
const
bool
do_log
=
std
::
stoi
(
argv
[
6
]);
const
bool
time_kernel
=
std
::
stoi
(
argv
[
7
]);
const
int
num_dim_spatial
=
std
::
stoi
(
argv
[
8
]);
// 8 for control, 1 for num_dim_spatial, 4 for G/N/K/C, and 6 * num_dim_spatial
if
(
argc
!=
8
+
1
+
4
+
6
*
num_dim_spatial
)
{
print_helper_msg
();
return
1
;
}
const
auto
params
=
ck
::
utils
::
conv
::
parse_conv_param
(
num_dim_spatial
,
9
,
argv
);
using
F32
=
float
;
using
F16
=
ck
::
half_t
;
using
BF16
=
ck
::
bhalf_t
;
using
INT8
=
int8_t
;
//
using
GNWC
=
ck
::
tensor_layout
::
convolution
::
GNWC
;
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
GNDHWC
=
ck
::
tensor_layout
::
convolution
::
GNDHWC
;
using
GKXC
=
ck
::
tensor_layout
::
convolution
::
GKXC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GKZYXC
=
ck
::
tensor_layout
::
convolution
::
GKZYXC
;
using
GNWK
=
ck
::
tensor_layout
::
convolution
::
GNWK
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
GNDHWK
=
ck
::
tensor_layout
::
convolution
::
GNDHWK
;
//
using
NWGC
=
ck
::
tensor_layout
::
convolution
::
NWGC
;
using
NHWGC
=
ck
::
tensor_layout
::
convolution
::
NHWGC
;
using
NDHWGC
=
ck
::
tensor_layout
::
convolution
::
NDHWGC
;
using
KXGC
=
ck
::
tensor_layout
::
convolution
::
KXGC
;
using
KYXGC
=
ck
::
tensor_layout
::
convolution
::
KYXGC
;
using
KZYXGC
=
ck
::
tensor_layout
::
convolution
::
KZYXGC
;
using
NWGK
=
ck
::
tensor_layout
::
convolution
::
NWGK
;
using
NHWGK
=
ck
::
tensor_layout
::
convolution
::
NHWGK
;
using
NDHWGK
=
ck
::
tensor_layout
::
convolution
::
NDHWGK
;
constexpr
auto
I1
=
ck
::
Number
<
1
>
{};
constexpr
auto
I2
=
ck
::
Number
<
2
>
{};
constexpr
auto
I3
=
ck
::
Number
<
3
>
{};
auto
profile
=
[
&
](
auto
num_dim_spatial_tmp
,
auto
in_layout
,
auto
wei_layout
,
auto
out_layout
,
auto
in_type
,
auto
wei_type
,
auto
out_type
)
{
constexpr
ck
::
index_t
NDimSpatial
=
num_dim_spatial_tmp
.
value
;
using
InLayout
=
decltype
(
in_layout
);
using
WeiLayout
=
decltype
(
wei_layout
);
using
OutLayout
=
decltype
(
out_layout
);
using
InDataType
=
decltype
(
in_type
);
using
WeiDataType
=
decltype
(
wei_type
);
using
OutDataType
=
decltype
(
out_type
);
bool
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
NDimSpatial
,
InLayout
,
WeiLayout
,
OutLayout
,
InDataType
,
WeiDataType
,
OutDataType
>
(
do_verification
,
init_method
,
do_log
,
time_kernel
,
params
);
return
pass
?
0
:
1
;
};
// GNHWC_GKYXC_GNHWK
if
(
num_dim_spatial
==
1
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I1
,
GNWC
{},
GKXC
{},
GNWK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I2
,
GNHWC
{},
GKYXC
{},
GNHWK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
GNHWC_GKYXC_GNHWK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I3
,
GNDHWC
{},
GKZYXC
{},
GNDHWK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
// NHWGC_KYXGC_NHWGK
else
if
(
num_dim_spatial
==
1
&&
layout
==
ConvLayout
::
NHWGC_KYXGC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I1
,
NWGC
{},
KXGC
{},
NWGK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I1
,
NWGC
{},
KXGC
{},
NWGK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I1
,
NWGC
{},
KXGC
{},
NWGK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I1
,
NWGC
{},
KXGC
{},
NWGK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
2
&&
layout
==
ConvLayout
::
NHWGC_KYXGC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I2
,
NHWGC
{},
KYXGC
{},
NHWGK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I2
,
NHWGC
{},
KYXGC
{},
NHWGK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I2
,
NHWGC
{},
KYXGC
{},
NHWGK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I2
,
NHWGC
{},
KYXGC
{},
NHWGK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
else
if
(
num_dim_spatial
==
3
&&
layout
==
ConvLayout
::
NHWGC_KYXGC_NHWGK
)
{
if
(
data_type
==
ConvDataType
::
F32_F32_F32
)
{
return
profile
(
I3
,
NDHWGC
{},
KZYXGC
{},
NDHWGK
{},
F32
{},
F32
{},
F32
{});
}
else
if
(
data_type
==
ConvDataType
::
F16_F16_F16
)
{
return
profile
(
I3
,
NDHWGC
{},
KZYXGC
{},
NDHWGK
{},
F16
{},
F16
{},
F16
{});
}
else
if
(
data_type
==
ConvDataType
::
BF16_BF16_BF16
)
{
return
profile
(
I3
,
NDHWGC
{},
KZYXGC
{},
NDHWGK
{},
BF16
{},
BF16
{},
BF16
{});
}
else
if
(
data_type
==
ConvDataType
::
INT8_INT8_INT8
)
{
return
profile
(
I3
,
NDHWGC
{},
KZYXGC
{},
NDHWGK
{},
INT8
{},
INT8
{},
INT8
{});
}
}
std
::
cout
<<
"this data_type & layout is not implemented"
<<
std
::
endl
;
return
1
;
}
profiler/src/profile_grouped_gemm.cpp
View file @
a1841d55
...
...
@@ -83,7 +83,7 @@ int profile_grouped_gemm(int argc, char* argv[])
ck
::
profiler
::
profile_grouped_gemm_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_
t
,
floa
t
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
...
...
@@ -102,7 +102,7 @@ int profile_grouped_gemm(int argc, char* argv[])
ck
::
profiler
::
profile_grouped_gemm_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_
t
,
floa
t
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
ColumnMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
...
...
@@ -121,7 +121,7 @@ int profile_grouped_gemm(int argc, char* argv[])
ck
::
profiler
::
profile_grouped_gemm_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_
t
,
floa
t
,
ck
::
tensor_layout
::
gemm
::
ColumnMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
...
...
@@ -140,7 +140,7 @@ int profile_grouped_gemm(int argc, char* argv[])
ck
::
profiler
::
profile_grouped_gemm_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_
t
,
floa
t
,
ck
::
tensor_layout
::
gemm
::
ColumnMajor
,
ck
::
tensor_layout
::
gemm
::
ColumnMajor
,
ck
::
tensor_layout
::
gemm
::
RowMajor
>
(
do_verification
,
...
...
profiler/src/profile_reduce.cpp
View file @
a1841d55
...
...
@@ -11,7 +11,7 @@
#include "ck/utility/reduction_enums.hpp"
#include "ck/library/
host_tensor
/host_common_util.hpp"
#include "ck/library/
utility
/host_common_util.hpp"
#include "profiler/include/profile_reduce_impl.hpp"
#include "profiler/include/data_type_enum.hpp"
...
...
profiler/src/profiler.cpp
View file @
a1841d55
...
...
@@ -15,12 +15,11 @@ int profile_grouped_gemm(int, char*[]);
int
profile_conv_fwd
(
int
,
char
*
[]);
int
profile_conv_fwd_bias_relu
(
int
,
char
*
[]);
int
profile_conv_fwd_bias_relu_add
(
int
,
char
*
[]);
int
profile_convnd_fwd
(
int
argc
,
char
*
argv
[]);
int
profile_convnd_bwd_data
(
int
,
char
*
[],
int
);
int
profile_conv_bwd_data
(
int
,
char
*
[]);
int
profile_conv_bwd_weight
(
int
,
char
*
[]);
int
profile_grouped_conv_fwd
(
int
,
char
*
[]);
int
profile_normalization
(
int
,
char
*
[]);
int
profile_reduce
(
int
,
char
*
[]);
int
profile_convnd_bwd_weight
(
int
,
char
*
[],
int
);
static
void
print_helper_message
()
{
...
...
@@ -34,13 +33,12 @@ static void print_helper_message()
" batched_gemm: Batched GEMM
\n
"
" batched_gemm_reduce: Batched GEMM+Reduce
\n
"
" grouped_gemm: Grouped GEMM
\n
"
" conv_fwd:
Forward
Convolution
\n
"
" conv_fwd: Convolution
Forward
\n
"
" conv_fwd_bias_relu: ForwardConvolution+Bias+ReLU
\n
"
" conv_fwd_bias_relu_add: ForwardConvolution+Bias+ReLU+Add
\n
"
" conv1d_bwd_data: BackwardConvolution data 1 dim
\n
"
" conv2d_bwd_data: BackwardConvolution data 2 dim
\n
"
" conv3d_bwd_data: BackwardConvolution data 3 dim
\n
"
" conv2d_bwd_weight: Backward Weight Convolution 2d
\n
"
" conv_bwd_data: Convolution Backward Data
\n
"
" conv_bwd_weight: Convolution Backward Weight
\n
"
" grouped_conv_fwd: Grouped Convolution Forward
\n
"
" reduce: Reduce
\n
"
);
// clang-format on
}
...
...
@@ -53,8 +51,7 @@ int main(int argc, char* argv[])
return
0
;
}
if
(
strcmp
(
argv
[
1
],
"gemm"
)
==
0
)
else
if
(
strcmp
(
argv
[
1
],
"gemm"
)
==
0
)
{
return
profile_gemm
(
argc
,
argv
);
}
...
...
@@ -92,7 +89,7 @@ int main(int argc, char* argv[])
}
else
if
(
strcmp
(
argv
[
1
],
"conv_fwd"
)
==
0
)
{
return
profile_conv
nd
_fwd
(
argc
,
argv
);
return
profile_conv_fwd
(
argc
,
argv
);
}
else
if
(
strcmp
(
argv
[
1
],
"conv_fwd_bias_relu"
)
==
0
)
{
...
...
@@ -102,33 +99,17 @@ int main(int argc, char* argv[])
{
return
profile_conv_fwd_bias_relu_add
(
argc
,
argv
);
}
else
if
(
strcmp
(
argv
[
1
],
"conv1d_bwd_data"
)
==
0
)
{
return
profile_convnd_bwd_data
(
argc
,
argv
,
1
);
}
else
if
(
strcmp
(
argv
[
1
],
"conv2d_bwd_data"
)
==
0
)
{
return
profile_convnd_bwd_data
(
argc
,
argv
,
2
);
}
else
if
(
strcmp
(
argv
[
1
],
"conv3d_bwd_data"
)
==
0
)
else
if
(
strcmp
(
argv
[
1
],
"conv_bwd_data"
)
==
0
)
{
return
profile_conv
nd
_bwd_data
(
argc
,
argv
,
3
);
return
profile_conv_bwd_data
(
argc
,
argv
);
}
else
if
(
strcmp
(
argv
[
1
],
"conv
2d
_bwd_weight"
)
==
0
)
else
if
(
strcmp
(
argv
[
1
],
"conv_bwd_weight"
)
==
0
)
{
return
profile_conv_bwd_weight
(
argc
,
argv
);
}
else
if
(
strcmp
(
argv
[
1
],
"convnd1d_bwd_weight"
)
==
0
)
{
return
profile_convnd_bwd_weight
(
argc
,
argv
,
1
);
}
else
if
(
strcmp
(
argv
[
1
],
"convnd2d_bwd_weight"
)
==
0
)
{
return
profile_convnd_bwd_weight
(
argc
,
argv
,
2
);
}
else
if
(
strcmp
(
argv
[
1
],
"convnd3d_bwd_weight"
)
==
0
)
else
if
(
strcmp
(
argv
[
1
],
"grouped_conv_fwd"
)
==
0
)
{
return
profile_
convnd_bwd_weight
(
argc
,
argv
,
3
);
return
profile_
grouped_conv_fwd
(
argc
,
argv
);
}
else
if
(
strcmp
(
argv
[
1
],
"reduce"
)
==
0
)
{
...
...
test/CMakeLists.txt
View file @
a1841d55
...
...
@@ -41,11 +41,11 @@ add_subdirectory(gemm_reduce)
add_subdirectory
(
batched_gemm
)
add_subdirectory
(
batched_gemm_reduce
)
add_subdirectory
(
grouped_gemm
)
add_subdirectory
(
convnd_fwd
)
add_subdirectory
(
reduce
)
add_subdirectory
(
conv
2
d_
b
wd
_weight
)
add_subdirectory
(
conv
n
d_
f
wd
)
add_subdirectory
(
convnd_bwd_weight
)
add_subdirectory
(
convnd_bwd_data
)
add_subdirectory
(
grouped_convnd_fwd
)
add_subdirectory
(
block_to_ctile_map
)
add_subdirectory
(
softmax
)
add_subdirectory
(
layernorm
)
test/batched_gemm/CMakeLists.txt
View file @
a1841d55
add_test_executable
(
test_batched_gemm_fp16 batched_gemm_fp16.cpp
)
target_link_libraries
(
test_batched_gemm_fp16 PRIVATE
host_tensor
)
target_link_libraries
(
test_batched_gemm_fp16 PRIVATE
utility
)
target_link_libraries
(
test_batched_gemm_fp16 PRIVATE device_batched_gemm_instance
)
test/batched_gemm_reduce/CMakeLists.txt
View file @
a1841d55
add_test_executable
(
test_batched_gemm_reduce_fp16 batched_gemm_reduce_fp16.cpp
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE
host_tensor
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE
utility
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE device_batched_gemm_reduce_instance
)
test/conv2d_bwd_data/CMakeLists.txt
deleted
100644 → 0
View file @
127bf7f4
add_test_executable
(
test_conv2d_bwd_data conv2d_bwd_data.cpp
)
target_link_libraries
(
test_conv2d_bwd_data PRIVATE host_tensor
)
target_link_libraries
(
test_conv2d_bwd_data PRIVATE device_conv2d_bwd_data_instance
)
test/conv2d_bwd_data/conv2d_bwd_data.cpp
deleted
100644 → 0
View file @
127bf7f4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "config.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_conv.hpp"
#include "tensor_layout.hpp"
#include "device_tensor.hpp"
#include "device_conv_bwd_data.hpp"
#include "element_wise_operation.hpp"
#include "reference_conv_bwd_data.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
BF16
=
ck
::
bhalf_t
;
using
INT8
=
int8_t
;
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
DeviceConvBwdDataNoOpPtr
=
DeviceConvBwdDataPtr
<
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
>
;
void
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f32_instances
(
std
::
vector
<
DeviceConvBwdDataNoOpPtr
>&
);
void
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f16_instances
(
std
::
vector
<
DeviceConvBwdDataNoOpPtr
>&
);
void
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_bf16_instances
(
std
::
vector
<
DeviceConvBwdDataNoOpPtr
>&
);
void
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_int8_instances
(
std
::
vector
<
DeviceConvBwdDataNoOpPtr
>&
);
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
WeiElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
OutElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
template
<
typename
T
>
static
bool
check_out
(
const
Tensor
<
T
>&
ref
,
const
Tensor
<
T
>&
result
)
{
float
max_diff
=
1e-6
;
for
(
int
i
=
0
;
i
<
ref
.
mData
.
size
();
++
i
)
{
float
diff
=
std
::
abs
(
double
(
ref
.
mData
[
i
])
-
double
(
result
.
mData
[
i
]));
if
(
max_diff
<
diff
)
{
return
false
;
}
}
return
true
;
}
int
main
(
int
argc
,
char
*
argv
[])
{
int
data_type
=
0
;
int
init_method
=
0
;
// Conv shape
ck
::
index_t
N
=
128
;
ck
::
index_t
K
=
256
;
ck
::
index_t
C
=
192
;
ck
::
index_t
Y
=
3
;
ck
::
index_t
X
=
3
;
ck
::
index_t
Hi
=
71
;
ck
::
index_t
Wi
=
71
;
ck
::
index_t
conv_stride_h
=
2
;
ck
::
index_t
conv_stride_w
=
2
;
ck
::
index_t
conv_dilation_h
=
1
;
ck
::
index_t
conv_dilation_w
=
1
;
ck
::
index_t
in_left_pad_h
=
1
;
ck
::
index_t
in_left_pad_w
=
1
;
ck
::
index_t
in_right_pad_h
=
1
;
ck
::
index_t
in_right_pad_w
=
1
;
if
(
argc
==
1
)
{
data_type
=
1
;
init_method
=
1
;
}
else
if
(
argc
==
3
)
{
data_type
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
}
else
if
(
argc
==
18
)
{
data_type
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
N
=
std
::
stoi
(
argv
[
3
]);
K
=
std
::
stoi
(
argv
[
4
]);
C
=
std
::
stoi
(
argv
[
5
]);
Y
=
std
::
stoi
(
argv
[
6
]);
X
=
std
::
stoi
(
argv
[
7
]);
Hi
=
std
::
stoi
(
argv
[
8
]);
Wi
=
std
::
stoi
(
argv
[
9
]);
conv_stride_h
=
std
::
stoi
(
argv
[
10
]);
conv_stride_w
=
std
::
stoi
(
argv
[
11
]);
conv_dilation_h
=
std
::
stoi
(
argv
[
12
]);
conv_dilation_w
=
std
::
stoi
(
argv
[
13
]);
in_left_pad_h
=
std
::
stoi
(
argv
[
14
]);
in_left_pad_w
=
std
::
stoi
(
argv
[
15
]);
in_right_pad_h
=
std
::
stoi
(
argv
[
16
]);
in_right_pad_w
=
std
::
stoi
(
argv
[
17
]);
}
else
{
printf
(
"arg1: data type (0=fp32, 1=fp16, 2= bfp16, 3= int8_t )
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3 to 17: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx
\n
"
);
exit
(
1
);
}
auto
Run
=
[
&
](
auto
input_type
,
auto
wei_type
,
auto
out_type
,
auto
acc_type
)
{
using
InDataType
=
decltype
(
input_type
);
using
WeiDataType
=
decltype
(
wei_type
);
using
OutDataType
=
decltype
(
out_type
);
using
AccDataType
=
decltype
(
acc_type
);
using
ReferenceConvBwdInstance
=
ck
::
tensor_operation
::
host
::
ReferenceConvBwdData
<
InDataType
,
WeiDataType
,
OutDataType
,
AccDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
>
;
const
ck
::
index_t
YEff
=
(
Y
-
1
)
*
conv_dilation_h
+
1
;
const
ck
::
index_t
XEff
=
(
X
-
1
)
*
conv_dilation_w
+
1
;
const
ck
::
index_t
Ho
=
(
Hi
+
in_left_pad_h
+
in_right_pad_h
-
YEff
)
/
conv_stride_h
+
1
;
const
ck
::
index_t
Wo
=
(
Wi
+
in_left_pad_w
+
in_right_pad_w
-
XEff
)
/
conv_stride_w
+
1
;
const
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
{{
Hi
,
Wi
}};
const
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
{{
Y
,
X
}};
const
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
{{
Ho
,
Wo
}};
const
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
{{
conv_stride_h
,
conv_stride_w
}};
const
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
{{
conv_dilation_h
,
conv_dilation_w
}};
const
std
::
vector
<
ck
::
index_t
>
input_left_pads
{{
in_left_pad_h
,
in_left_pad_w
}};
const
std
::
vector
<
ck
::
index_t
>
input_right_pads
{{
in_right_pad_h
,
in_right_pad_w
}};
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
N_
,
std
::
size_t
C_
,
std
::
size_t
H
,
std
::
size_t
W
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
N_
,
C_
,
H
,
W
}),
std
::
vector
<
std
::
size_t
>
({
C_
*
H
*
W
,
1
,
W
*
C_
,
C_
}));
};
Tensor
<
OutDataType
>
out_n_k_ho_wo
(
f_host_tensor_descriptor
(
N
,
K
,
Ho
,
Wo
));
Tensor
<
WeiDataType
>
wei_k_c_y_x
(
f_host_tensor_descriptor
(
K
,
C
,
Y
,
X
));
Tensor
<
InDataType
>
in_n_c_hi_wi_host_result
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
Tensor
<
InDataType
>
in_n_c_hi_wi_device_result
(
f_host_tensor_descriptor
(
N
,
C
,
Hi
,
Wi
));
std
::
cout
<<
"in_n_c_hi_wi: "
<<
in_n_c_hi_wi_host_result
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"wei_k_c_y_x: "
<<
wei_k_c_y_x
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"out_n_k_ho_wo: "
<<
out_n_k_ho_wo
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
out_n_k_ho_wo
.
GenerateTensorValue
(
GeneratorTensor_2
<
OutDataType
>
{
-
5
,
5
});
wei_k_c_y_x
.
GenerateTensorValue
(
GeneratorTensor_2
<
WeiDataType
>
{
-
5
,
5
});
break
;
default:
out_n_k_ho_wo
.
GenerateTensorValue
(
GeneratorTensor_1
<
OutDataType
>
{
1
});
wei_k_c_y_x
.
GenerateTensorValue
(
GeneratorTensor_1
<
WeiDataType
>
{
1
});
}
DeviceMem
in_device_buf
(
sizeof
(
InDataType
)
*
in_n_c_hi_wi_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
wei_device_buf
(
sizeof
(
WeiDataType
)
*
wei_k_c_y_x
.
mDesc
.
GetElementSpace
());
DeviceMem
out_device_buf
(
sizeof
(
OutDataType
)
*
out_n_k_ho_wo
.
mDesc
.
GetElementSpace
());
out_device_buf
.
ToDevice
(
out_n_k_ho_wo
.
mData
.
data
());
wei_device_buf
.
ToDevice
(
wei_k_c_y_x
.
mData
.
data
());
// reset input to zero
in_n_c_hi_wi_device_result
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{
0
});
in_device_buf
.
ToDevice
(
in_n_c_hi_wi_device_result
.
mData
.
data
());
// get host result
{
auto
ref_conv
=
ReferenceConvBwdInstance
{};
auto
ref_invoker
=
ref_conv
.
MakeInvoker
();
auto
ref_argument
=
ref_conv
.
MakeArgument
(
in_n_c_hi_wi_host_result
,
wei_k_c_y_x
,
out_n_k_ho_wo
,
conv_filter_strides
,
conv_filter_dilations
,
input_left_pads
,
input_right_pads
,
InElementOp
{},
WeiElementOp
{},
OutElementOp
{});
ref_invoker
.
Run
(
ref_argument
);
}
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
DeviceConvBwdDataNoOpPtr
=
ck
::
tensor_operation
::
device
::
DeviceConvBwdDataPtr
<
PassThrough
,
PassThrough
,
PassThrough
>
;
// add device Conv instances
std
::
vector
<
DeviceConvBwdDataNoOpPtr
>
conv_ptrs
;
if
constexpr
(
ck
::
is_same_v
<
ck
::
remove_cv_t
<
InDataType
>
,
float
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
WeiDataType
>
,
float
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
OutDataType
>
,
float
>
)
{
ck
::
tensor_operation
::
device
::
instance
::
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f32_instances
(
conv_ptrs
);
}
else
if
constexpr
(
ck
::
is_same_v
<
ck
::
remove_cv_t
<
InDataType
>
,
ck
::
half_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
WeiDataType
>
,
ck
::
half_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
OutDataType
>
,
ck
::
half_t
>
)
{
ck
::
tensor_operation
::
device
::
instance
::
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f16_instances
(
conv_ptrs
);
}
else
if
constexpr
(
ck
::
is_same_v
<
ck
::
remove_cv_t
<
InDataType
>
,
ck
::
bhalf_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
WeiDataType
>
,
ck
::
bhalf_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
OutDataType
>
,
ck
::
bhalf_t
>
)
{
ck
::
tensor_operation
::
device
::
instance
::
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_bf16_instances
(
conv_ptrs
);
}
else
if
constexpr
(
ck
::
is_same_v
<
ck
::
remove_cv_t
<
InDataType
>
,
int8_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
WeiDataType
>
,
int8_t
>
&&
ck
::
is_same_v
<
ck
::
remove_cv_t
<
OutDataType
>
,
int8_t
>
)
{
ck
::
tensor_operation
::
device
::
instance
::
add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_int8_instances
(
conv_ptrs
);
}
if
(
conv_ptrs
.
size
()
<=
0
)
{
throw
std
::
runtime_error
(
"wrong! no device Conv instance found"
);
}
// profile device Conv instances
bool
success
=
true
;
for
(
auto
&
conv_ptr
:
conv_ptrs
)
{
auto
argument_ptr
=
conv_ptr
->
MakeArgumentPointer
(
static_cast
<
InDataType
*>
(
in_device_buf
.
GetDeviceBuffer
()),
static_cast
<
WeiDataType
*>
(
wei_device_buf
.
GetDeviceBuffer
()),
static_cast
<
OutDataType
*>
(
out_device_buf
.
GetDeviceBuffer
()),
N
,
K
,
C
,
input_spatial_lengths
,
filter_spatial_lengths
,
output_spatial_lengths
,
conv_filter_strides
,
conv_filter_dilations
,
input_left_pads
,
input_right_pads
,
InElementOp
{},
WeiElementOp
{},
OutElementOp
{});
if
(
conv_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
auto
invoker_ptr
=
conv_ptr
->
MakeInvokerPointer
();
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
1
);
in_device_buf
.
FromDevice
(
in_n_c_hi_wi_device_result
.
mData
.
data
());
if
(
!
check_out
(
in_n_c_hi_wi_host_result
,
in_n_c_hi_wi_device_result
))
{
std
::
cout
<<
"Fail Info: "
<<
conv_ptr
->
GetTypeString
()
<<
std
::
endl
;
success
=
false
;
}
else
{
std
::
cout
<<
"Pass Info: "
<<
conv_ptr
->
GetTypeString
()
<<
std
::
endl
;
}
}
else
{
std
::
cout
<<
"Not support Info: "
<<
conv_ptr
->
GetTypeString
()
<<
std
::
endl
;
}
}
if
(
success
)
{
std
::
cout
<<
"test conv2d bwd : Pass"
<<
std
::
endl
;
return
0
;
}
else
{
std
::
cout
<<
"test conv2d bwd: Fail "
<<
std
::
endl
;
return
-
1
;
}
};
if
(
data_type
==
0
)
{
return
Run
(
F32
(),
F32
(),
F32
(),
F32
());
}
else
if
(
data_type
==
1
)
{
return
Run
(
F16
(),
F16
(),
F16
(),
F32
());
}
else
if
(
data_type
==
2
)
{
return
Run
(
BF16
(),
BF16
(),
BF16
(),
F32
());
}
else
if
(
data_type
==
3
)
{
return
Run
(
INT8
(),
INT8
(),
INT8
(),
int
());
}
else
{
return
1
;
}
}
test/conv2d_bwd_weight/CMakeLists.txt
deleted
100644 → 0
View file @
127bf7f4
#add_test_executable(test_conv2d_bwd_weight conv2d_bwd_weight.cpp)
#target_link_libraries(test_conv2d_bwd_weight PRIVATE host_tensor device_conv2d_bwd_weight_instance conv_util)
test/conv2d_bwd_weight/conv2d_bwd_weight.cpp
deleted
100644 → 0
View file @
127bf7f4
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <vector>
#include "test/convnd_fwd/conv_util.hpp"
#include "profiler/include/profile_conv_bwd_weight_impl.hpp"
int
test_self
()
{
bool
pass
=
true
;
std
::
vector
<
ck
::
utils
::
conv
::
ConvParams
>
params
;
params
.
push_back
({
2
,
128
,
256
,
256
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
params
.
push_back
({
2
,
128
,
256
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
params
.
push_back
({
2
,
128
,
256
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
for
(
auto
&
param
:
params
)
{
// f32
pass
&=
ck
::
profiler
::
profile_conv_bwd_weight_impl
<
2
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
.
N_
,
param
.
K_
,
param
.
C_
,
param
.
input_spatial_lengths_
,
param
.
filter_spatial_lengths_
,
param
.
GetOutputSpatialLengths
(),
param
.
conv_filter_strides_
,
param
.
conv_filter_dilations_
,
param
.
input_left_pads_
,
param
.
input_right_pads_
,
2
);
// fp16
pass
&=
ck
::
profiler
::
profile_conv_bwd_weight_impl
<
2
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
.
N_
,
param
.
K_
,
param
.
C_
,
param
.
input_spatial_lengths_
,
param
.
filter_spatial_lengths_
,
param
.
GetOutputSpatialLengths
(),
param
.
conv_filter_strides_
,
param
.
conv_filter_dilations_
,
param
.
input_left_pads_
,
param
.
input_right_pads_
,
2
);
}
return
pass
;
}
int
main
(
int
argc
,
char
*
argv
[])
{
int
data_type
=
1
;
int
init_method
=
1
;
// Conv shape
ck
::
index_t
N
=
128
;
ck
::
index_t
K
=
256
;
ck
::
index_t
C
=
192
;
ck
::
index_t
Y
=
3
;
ck
::
index_t
X
=
3
;
ck
::
index_t
Hi
=
71
;
ck
::
index_t
Wi
=
71
;
ck
::
index_t
conv_stride_h
=
2
;
ck
::
index_t
conv_stride_w
=
2
;
ck
::
index_t
conv_dilation_h
=
1
;
ck
::
index_t
conv_dilation_w
=
1
;
ck
::
index_t
in_left_pad_h
=
1
;
ck
::
index_t
in_left_pad_w
=
1
;
ck
::
index_t
in_right_pad_h
=
1
;
ck
::
index_t
in_right_pad_w
=
1
;
ck
::
index_t
split_k
=
1
;
bool
pass
=
true
;
if
(
argc
==
1
)
{
pass
=
test_self
();
}
else
{
if
(
argc
==
3
)
{
data_type
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
}
else
if
(
argc
==
19
)
{
data_type
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
N
=
std
::
stoi
(
argv
[
3
]);
K
=
std
::
stoi
(
argv
[
4
]);
C
=
std
::
stoi
(
argv
[
5
]);
Y
=
std
::
stoi
(
argv
[
6
]);
X
=
std
::
stoi
(
argv
[
7
]);
Hi
=
std
::
stoi
(
argv
[
8
]);
Wi
=
std
::
stoi
(
argv
[
9
]);
conv_stride_h
=
std
::
stoi
(
argv
[
10
]);
conv_stride_w
=
std
::
stoi
(
argv
[
11
]);
conv_dilation_h
=
std
::
stoi
(
argv
[
12
]);
conv_dilation_w
=
std
::
stoi
(
argv
[
13
]);
in_left_pad_h
=
std
::
stoi
(
argv
[
14
]);
in_left_pad_w
=
std
::
stoi
(
argv
[
15
]);
in_right_pad_h
=
std
::
stoi
(
argv
[
16
]);
in_right_pad_w
=
std
::
stoi
(
argv
[
17
]);
split_k
=
std
::
stoi
(
argv
[
18
]);
}
else
{
printf
(
"arg1: data type (0=fp32, 1=fp16, 2= bfp16, 3= int8_t )
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3 to 17: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx
\n
"
);
exit
(
1
);
}
ck
::
utils
::
conv
::
ConvParams
param
{
2
,
N
,
K
,
C
,
{
Y
,
X
},
{
Hi
,
Wi
},
{
conv_stride_h
,
conv_stride_w
},
{
conv_dilation_h
,
conv_dilation_w
},
{
in_left_pad_h
,
in_left_pad_w
},
{
in_right_pad_h
,
in_right_pad_w
}};
if
(
data_type
==
0
)
{
pass
=
ck
::
profiler
::
profile_conv_bwd_weight_impl
<
2
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
true
,
// do_verification
init_method
,
false
,
// do_log
false
,
// time_kernel
param
.
N_
,
param
.
K_
,
param
.
C_
,
param
.
input_spatial_lengths_
,
param
.
filter_spatial_lengths_
,
param
.
GetOutputSpatialLengths
(),
param
.
conv_filter_strides_
,
param
.
conv_filter_dilations_
,
param
.
input_left_pads_
,
param
.
input_right_pads_
,
split_k
);
}
else
if
(
data_type
==
1
)
{
pass
=
ck
::
profiler
::
profile_conv_bwd_weight_impl
<
2
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
tensor_layout
::
convolution
::
NHWC
,
ck
::
tensor_layout
::
convolution
::
KYXC
,
ck
::
tensor_layout
::
convolution
::
NHWK
>
(
true
,
// do_verification
init_method
,
false
,
// do_log
false
,
// time_kernel
param
.
N_
,
param
.
K_
,
param
.
C_
,
param
.
input_spatial_lengths_
,
param
.
filter_spatial_lengths_
,
param
.
GetOutputSpatialLengths
(),
param
.
conv_filter_strides_
,
param
.
conv_filter_dilations_
,
param
.
input_left_pads_
,
param
.
input_right_pads_
,
split_k
);
}
else
{
std
::
cout
<<
"Not support data type"
<<
std
::
endl
;
return
1
;
}
}
if
(
pass
)
{
std
::
cout
<<
"test conv2d bwd weight : Pass"
<<
std
::
endl
;
return
0
;
}
else
{
std
::
cout
<<
"test conv2d bwd weight: Fail "
<<
std
::
endl
;
return
-
1
;
}
}
test/conv_util/CMakeLists.txt
View file @
a1841d55
add_gtest_executable
(
test_conv_util conv_util.cpp
)
target_link_libraries
(
test_conv_util PRIVATE
host_tensor conv_
util
)
target_link_libraries
(
test_conv_util PRIVATE util
ity
)
test/conv_util/conv_util.cpp
View file @
a1841d55
...
...
@@ -10,198 +10,147 @@
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/conv
_
uti
l
.hpp"
#include "ck/library/utility/conv
ol
uti
on_parameter
.hpp"
namespace
{
class
TestConvUtil
:
public
::
testing
::
Test
{
public:
void
SetNDParams
(
std
::
size_t
ndims
)
void
SetNDParams
(
std
::
size_t
ndims
,
std
::
size_t
s
,
std
::
size_t
d
,
std
::
size_t
p
)
{
conv_params
.
num_dim_spatial_
=
ndims
;
conv_params
.
filter_spatial_lengths_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
3
);
conv_params
.
input_spatial_lengths_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
71
);
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
2
);
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
1
);
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
1
);
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
(
ndims
,
1
);
conv_params
=
ck
::
utils
::
conv
::
ConvParam
(
ndims
,
2
,
128
,
192
,
256
,
std
::
vector
<
ck
::
index_t
>
(
ndims
,
3
),
std
::
vector
<
ck
::
index_t
>
(
ndims
,
71
),
std
::
vector
<
ck
::
index_t
>
(
ndims
,
s
),
std
::
vector
<
ck
::
index_t
>
(
ndims
,
d
),
std
::
vector
<
ck
::
index_t
>
(
ndims
,
p
),
std
::
vector
<
ck
::
index_t
>
(
ndims
,
p
));
}
protected:
// ------- default 2D -------
// input NCHW {128,192,71,71},
// weights KCYX {256,192,3,3},
// stride {
2,2
},
// dilations {
1,1
},
// padding {{
1,1}, {1,1}
}
ck
::
utils
::
conv
::
ConvParam
s
conv_params
;
// input
G
NCHW {
2,
128,
192,
71,
71},
// weights
G
KCYX {
2,
256,
192,
3,
3},
// stride {
s, s
},
// dilations {
d, d
},
// padding {{
p, p}, {p, p
}
ck
::
utils
::
conv
::
ConvParam
conv_params
;
};
}
// namespace
TEST_F
(
TestConvUtil
,
ConvParamsGetOutputSpatialLengths
2
D
)
TEST_F
(
TestConvUtil
,
ConvParamsGetOutputSpatialLengths
1
D
)
{
ck
::
utils
::
conv
::
ConvParams
conv_params
;
// stride 2, dilation 1, pad 1
SetNDParams
(
1
,
2
,
1
,
1
);
std
::
vector
<
ck
::
index_t
>
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
},
"Error: ConvParams 2D default constructor."
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
},
"Error: ConvParams 1D."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 1, dilation 1, pad 1
SetNDParams
(
1
,
1
,
1
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
71
,
71
},
"Error: ConvParams
2
D stride {1
,1
}."
));
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
71
},
"Error: ConvParams
1
D stride {1}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 1, pad 2
SetNDParams
(
1
,
2
,
1
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
37
,
37
},
"Error: ConvParams
2
D padding left/right {2
,2
}."
));
std
::
vector
<
ck
::
index_t
>
{
37
},
"Error: ConvParams
1
D padding left/right {2}."
));
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 2, pad 2
SetNDParams
(
1
,
2
,
2
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
},
"Error: ConvParams
2
D dilation {2
,2
}."
));
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
},
"Error: ConvParams
1
D dilation {2}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
3
,
3
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 3, dilation 2, pad 1
SetNDParams
(
1
,
3
,
2
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
23
,
23
},
"Error: ConvParams
2
D strides{3
,3
}, padding {1
,1
}, dilations {2
,2
}."
));
std
::
vector
<
ck
::
index_t
>
{
23
},
"Error: ConvParams
1
D strides{3}, padding {1}, dilations {2}."
));
}
TEST_F
(
TestConvUtil
,
ConvParamsGetOutputSpatialLengths
1
D
)
TEST_F
(
TestConvUtil
,
ConvParamsGetOutputSpatialLengths
2
D
)
{
SetNDParams
(
1
);
// stride 2, dilation 1, pad 1
SetNDParams
(
2
,
2
,
1
,
1
);
std
::
vector
<
ck
::
index_t
>
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
},
"Error: ConvParams 1D."
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
},
"Error: ConvParams 2D default constructor."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
1
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 1, dilation 1, pad 1
SetNDParams
(
2
,
1
,
1
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
71
},
"Error: ConvParams
1
D stride {1}."
));
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
71
,
71
},
"Error: ConvParams
2
D stride {1
,1
}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
2
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 1, pad 2
SetNDParams
(
2
,
2
,
1
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
37
},
"Error: ConvParams
1
D padding left/right {2}."
));
std
::
vector
<
ck
::
index_t
>
{
37
,
37
},
"Error: ConvParams
2
D padding left/right {2
,2
}."
));
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 2, pad 2
SetNDParams
(
2
,
2
,
2
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
},
"Error: ConvParams
1
D dilation {2}."
));
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
},
"Error: ConvParams
2
D dilation {2
,2
}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
3
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
};
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 3, dilation 2, pad 1
SetNDParams
(
2
,
3
,
2
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
23
},
"Error: ConvParams
1
D strides{3}, padding {1}, dilations {2}."
));
std
::
vector
<
ck
::
index_t
>
{
23
,
23
},
"Error: ConvParams
2
D strides{3
,3
}, padding {1
,1
}, dilations {2
,2
}."
));
}
TEST_F
(
TestConvUtil
,
ConvParamsGetOutputSpatialLengths3D
)
{
SetNDParams
(
3
);
// stride 2, dilation 1, pad 1
SetNDParams
(
3
,
2
,
1
,
1
);
std
::
vector
<
ck
::
index_t
>
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
,
36
},
"Error: ConvParams 3D."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 1, dilation 1, pad 1
SetNDParams
(
3
,
1
,
1
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
71
,
71
,
71
},
"Error: ConvParams 3D stride {1, 1, 1}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
,
2
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
,
2
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 1, pad 2
SetNDParams
(
3
,
2
,
1
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
37
,
37
,
37
},
"Error: ConvParams 3D padding left/right {2, 2, 2}."
));
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 2, dilation 2, pad 2
SetNDParams
(
3
,
2
,
2
,
2
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
36
,
36
,
36
},
"Error: ConvParams 3D dilation {2, 2, 2}."
));
conv_params
.
conv_filter_strides_
=
std
::
vector
<
ck
::
index_t
>
{
3
,
3
,
3
};
conv_params
.
input_left_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
conv_params
.
input_right_pads_
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
conv_params
.
conv_filter_dilations_
=
std
::
vector
<
ck
::
index_t
>
{
2
,
2
,
2
};
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
// stride 3, dilation 2, pad 1
SetNDParams
(
3
,
3
,
2
,
1
);
out_spatial_len
=
conv_params
.
GetOutputSpatialLengths
();
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
out_spatial_len
,
std
::
vector
<
ck
::
index_t
>
{
23
,
23
,
23
},
"Error: ConvParams 3D strides{3, 3, 3}, padding {1, 1, 1}, dilations {2, 2, 2}."
));
}
TEST
(
ConvUtil
,
GetHostTensorDescriptor
)
{
namespace
tl
=
ck
::
tensor_layout
::
convolution
;
std
::
vector
<
std
::
size_t
>
dims
{
2
,
3
,
4
,
5
};
HostTensorDescriptor
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NHWC
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
{
2
,
3
,
4
,
5
},
"Error: wrong NHWC dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
*
5
,
1
,
3
*
5
,
3
},
"Error: wrong NHWC dimensions strides!"
));
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NCHW
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
{
2
,
3
,
4
,
5
},
"Error: wrong NCHW dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
*
5
,
4
*
5
,
5
,
1
},
"Error: wrong NCHW dimensions strides!"
));
dims
=
std
::
vector
<
std
::
size_t
>
{
2
,
3
,
4
};
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NWC
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
{
2
,
3
,
4
},
"Error: wrong NWC dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
,
1
,
3
},
"Error: wrong NWC dimensions strides!"
));
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NCW
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
{
2
,
3
,
4
},
"Error: wrong NCW dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
,
4
,
1
},
"Error: wrong NCW dimensions strides!"
));
dims
=
std
::
vector
<
std
::
size_t
>
{
2
,
3
,
4
,
5
,
6
};
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NDHWC
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
dims
,
"Error: wrong NDHWC dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
*
5
*
6
,
// N
1
,
// C
3
*
5
*
6
,
// D
3
*
6
,
// H
3
},
// W
"Error: wrong NDHWC dimensions strides!"
));
h
=
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
dims
,
tl
::
NCDHW
{});
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetLengths
(),
dims
,
"Error: wrong NCDHW dimensions lengths!"
));
EXPECT_TRUE
(
ck
::
utils
::
check_err
(
h
.
GetStrides
(),
{
3
*
4
*
5
*
6
,
// N
4
*
5
*
6
,
// C
5
*
6
,
// D
6
,
// H
1
},
// W
"Error: wrong NCDHW dimensions strides!"
));
}
test/convnd_bwd_data/CMakeLists.txt
View file @
a1841d55
add_test_executable
(
test_convnd_bwd_data convnd_bwd_data.cpp
)
target_link_libraries
(
test_convnd_bwd_data PRIVATE
host_tensor
device_conv
n
d_bwd_data_instance
conv_util
)
add_
g
test_executable
(
test_convnd_bwd_data convnd_bwd_data.cpp
)
target_link_libraries
(
test_convnd_bwd_data PRIVATE
utility device_conv1d_bwd_data_instance
device_conv
2
d_bwd_data_instance
device_conv3d_bwd_data_instance
)
Prev
1
…
13
14
15
16
17
18
19
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment