Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
4698993d
Unverified
Commit
4698993d
authored
Nov 15, 2022
by
Po Yen Chen
Committed by
GitHub
Nov 15, 2022
Browse files
Merge branch 'develop' into wmma_op
parents
ab663329
7038723a
Changes
202
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
73 additions
and
536 deletions
+73
-536
example/30_grouped_convnd_fwd_bias_relu_add/grouped_convnd_fwd_bias_relu_add_xdl_int8.cpp
...as_relu_add/grouped_convnd_fwd_bias_relu_add_xdl_int8.cpp
+0
-459
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_bf16.cpp
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_bf16.cpp
+1
-0
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp16.cpp
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp16.cpp
+1
-0
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp32.cpp
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp32.cpp
+1
-0
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int4.cpp
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int4.cpp
+1
-0
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int8.cpp
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int8.cpp
+1
-0
example/31_batched_gemm_gemm/run_batched_gemm_gemm_example.inc
...le/31_batched_gemm_gemm/run_batched_gemm_gemm_example.inc
+5
-5
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_lower_triangle_scale_softmax_gemm_permute_xdl_fp16.cpp
...mm_lower_triangle_scale_softmax_gemm_permute_xdl_fp16.cpp
+1
-0
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
...gemm/batched_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
+1
-0
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_scale_softmax_gemm_xdl_fp16.cpp
...softmax_gemm/batched_gemm_scale_softmax_gemm_xdl_fp16.cpp
+6
-5
example/32_batched_gemm_scale_softmax_gemm/grouped_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
...gemm/grouped_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
+1
-0
example/32_batched_gemm_scale_softmax_gemm/run_batched_gemm_scale_softmax_gemm_permute.inc
...tmax_gemm/run_batched_gemm_scale_softmax_gemm_permute.inc
+2
-2
example/32_batched_gemm_scale_softmax_gemm/run_grouped_gemm_scale_softmax_gemm_permute.inc
...tmax_gemm/run_grouped_gemm_scale_softmax_gemm_permute.inc
+12
-12
example/33_multiple_reduce/dual_reduce_common.hpp
example/33_multiple_reduce/dual_reduce_common.hpp
+7
-6
example/34_batchnorm/batchnorm_forward_nhwc.cpp
example/34_batchnorm/batchnorm_forward_nhwc.cpp
+10
-16
example/34_batchnorm/batchnorm_infer_nhwc.cpp
example/34_batchnorm/batchnorm_infer_nhwc.cpp
+6
-9
example/35_splitK_gemm/run_splitK_gemm_example.inc
example/35_splitK_gemm/run_splitK_gemm_example.inc
+7
-10
example/36_sparse_embedding/sparse_embedding3_forward_layernorm.cpp
..._sparse_embedding/sparse_embedding3_forward_layernorm.cpp
+3
-6
example/37_batched_gemm_add_add_relu_gemm_add/batched_gemm_add_add_relu_gemm_add_xdl_fp16.cpp
..._gemm_add/batched_gemm_add_add_relu_gemm_add_xdl_fp16.cpp
+6
-6
example/38_grouped_conv_bwd_data_multiple_d/common.hpp
example/38_grouped_conv_bwd_data_multiple_d/common.hpp
+1
-0
No files found.
example/30_grouped_convnd_fwd_bias_relu_add/grouped_convnd_fwd_bias_relu_add_xdl_int8.cpp
deleted
100644 → 0
View file @
ab663329
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "grouped_convnd_fwd_bias_relu_add_common.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
// kernel data types
using
InKernelDataType
=
int8_t
;
using
WeiKernelDataType
=
int8_t
;
using
AccDataType
=
int32_t
;
using
CShuffleDataType
=
int8_t
;
using
BiasKernelDataType
=
int8_t
;
using
ResidualKernelDataType
=
int8_t
;
using
OutKernelDataType
=
int8_t
;
// tensor data types
using
InUserDataType
=
InKernelDataType
;
using
WeiUserDataType
=
WeiKernelDataType
;
using
OutUserDataType
=
OutKernelDataType
;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
WeiElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
OutElementOp
=
ck
::
tensor_operation
::
element_wise
::
AddReluAdd
;
static
constexpr
auto
ConvSpec
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Default
;
static
constexpr
auto
GemmSpec
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKPadding
;
template
<
ck
::
index_t
NDimSpatial
,
typename
InLayout
,
typename
WeiLayout
,
typename
BiasLayout
,
typename
ResidualLayout
,
typename
OutLayout
>
using
DeviceGroupedConvNDFwdInstance
=
ck
::
tensor_operation
::
device
::
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
InLayout
,
WeiLayout
,
ck
::
Tuple
<
BiasLayout
,
ResidualLayout
>
,
OutLayout
,
InKernelDataType
,
WeiKernelDataType
,
AccDataType
,
CShuffleDataType
,
ck
::
Tuple
<
BiasKernelDataType
,
ResidualKernelDataType
>
,
OutKernelDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
,
ConvSpec
,
// ConvForwardSpecialization
GemmSpec
,
// GemmSpecialization
1
,
//
256
,
// BlockSize
128
,
// MPerBlock
256
,
// NPerBlock
64
,
// KPerBlock
16
,
// AK1
16
,
// BK1
32
,
// MPerXdl
32
,
// NPerXdl
2
,
// MXdlPerWave
4
,
// NXdlPerWave
S
<
4
,
64
,
1
>
,
// ABlockTransferThreadClusterLengths_AK0_M_AK1
S
<
1
,
0
,
2
>
,
// ABlockTransferThreadClusterArrangeOrder
S
<
1
,
0
,
2
>
,
// ABlockTransferSrcAccessOrder
2
,
// ABlockTransferSrcVectorDim
16
,
// ABlockTransferSrcScalarPerVector
16
,
// ABlockTransferDstScalarPerVector_AK1
1
,
// ABlockLdsExtraM
S
<
4
,
64
,
1
>
,
// BBlockTransferThreadClusterLengths_BK0_N_BK1
S
<
1
,
0
,
2
>
,
// BBlockTransferThreadClusterArrangeOrder
S
<
1
,
0
,
2
>
,
// BBlockTransferSrcAccessOrder
2
,
// BBlockTransferSrcVectorDim
16
,
// BBlockTransferSrcScalarPerVector
16
,
// BBlockTransferDstScalarPerVector_BK1
1
,
// BBlockLdsExtraN
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
16
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
namespace
ctc
=
ck
::
tensor_layout
::
convolution
;
print_helper_msg
();
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
// conventional group conv definition
// G = 2
// [N, C, Hi, Wi] = [128, 384, 71, 71]
// [K, C, Y, X] = [512, 192, 3, 3]
// [N, K, Ho, Wo] = [128, 512, 36, 36]
// CK group conv definition
// [G, N, C, Hi, Wi] = [2, 128, 192, 71, 71]
// [G, K, C, Y, X] = [2, 256, 192, 3, 3]
// [G, N, K, Ho, Wo] = [2, 128, 256, 36, 36]
ck
::
utils
::
conv
::
ConvParam
conv_param
{
2
,
2
,
128
,
256
,
192
,
{
3
,
3
},
{
71
,
71
},
{
2
,
2
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}};
if
(
argc
==
1
)
{
// use default
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
const
ck
::
index_t
num_dim_spatial
=
std
::
stoi
(
argv
[
4
]);
conv_param
=
ck
::
utils
::
conv
::
parse_conv_param
(
num_dim_spatial
,
5
,
argv
);
}
const
auto
in_element_op
=
InElementOp
{};
const
auto
wei_element_op
=
WeiElementOp
{};
const
auto
out_element_op
=
OutElementOp
{};
if
(
conv_param
.
num_dim_spatial_
==
1
)
{
using
InLayout
=
ctc
::
G_NW_C
;
using
WeiLayout
=
ctc
::
G_K_X_C
;
using
BiasLayout
=
ctc
::
G_K
;
using
ResidualLayout
=
ctc
::
G_NW_K
;
using
OutLayout
=
ctc
::
G_NW_K
;
const
auto
in_g_n_c_wis_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
C_
,
conv_param
.
input_spatial_lengths_
[
0
]},
{
conv_param
.
C_
,
// g
conv_param
.
input_spatial_lengths_
[
0
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// n
1
,
// c
conv_param
.
G_
*
conv_param
.
C_
// wi
});
const
auto
wei_g_k_c_xs_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
K_
,
conv_param
.
C_
,
conv_param
.
filter_spatial_lengths_
[
0
]},
{
conv_param
.
K_
*
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
C_
,
// g
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
C_
,
// k
1
,
// c
conv_param
.
C_
// x
});
const
auto
bias_g_n_k_wos_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
]},
{
conv_param
.
K_
,
// g
0
,
// k
1
,
// c
0
// x
});
const
auto
residual_g_n_k_wos_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
]},
{
conv_param
.
K_
,
// g
0
,
// k
1
,
// c
0
// x
});
const
auto
out_g_n_k_wos_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
]},
{
conv_param
.
K_
,
// g
conv_param
.
output_spatial_lengths_
[
0
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// n
1
,
// k
conv_param
.
G_
*
conv_param
.
K_
// wo
});
return
run_grouped_conv_fwd_bias_relu_add
<
1
,
InKernelDataType
,
WeiKernelDataType
,
CShuffleDataType
,
OutKernelDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
,
InUserDataType
,
WeiUserDataType
,
OutUserDataType
,
DeviceGroupedConvNDFwdInstance
<
1
,
InLayout
,
WeiLayout
,
BiasLayout
,
ResidualLayout
,
OutLayout
>>
(
do_verification
,
init_method
,
time_kernel
,
conv_param
,
in_g_n_c_wis_desc
,
wei_g_k_c_xs_desc
,
bias_g_n_k_wos_desc
,
residual_g_n_k_wos_desc
,
out_g_n_k_wos_desc
,
in_element_op
,
wei_element_op
,
out_element_op
);
}
else
if
(
conv_param
.
num_dim_spatial_
==
2
)
{
using
InLayout
=
ctc
::
G_NHW_C
;
using
WeiLayout
=
ctc
::
G_K_YX_C
;
using
BiasLayout
=
ctc
::
G_K
;
using
ResidualLayout
=
ctc
::
G_NHW_K
;
using
OutLayout
=
ctc
::
G_NHW_K
;
const
auto
in_g_n_c_wis_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
C_
,
conv_param
.
input_spatial_lengths_
[
0
],
conv_param
.
input_spatial_lengths_
[
1
]},
{
conv_param
.
C_
,
// g
conv_param
.
input_spatial_lengths_
[
0
]
*
conv_param
.
input_spatial_lengths_
[
1
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// n
1
,
// c
conv_param
.
input_spatial_lengths_
[
1
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// hi
conv_param
.
G_
*
conv_param
.
C_
// wi
});
const
auto
wei_g_k_c_xs_desc
=
HostTensorDescriptor
({
conv_param
.
G_
,
conv_param
.
K_
,
conv_param
.
C_
,
conv_param
.
filter_spatial_lengths_
[
0
],
conv_param
.
filter_spatial_lengths_
[
1
]},
{
conv_param
.
K_
*
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
C_
,
// g
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
C_
,
// k
1
,
// c
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
C_
,
// y
conv_param
.
C_
// x
});
const
auto
bias_g_n_k_wos_desc
=
HostTensorDescriptor
({
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
]},
{
conv_param
.
K_
,
// g
0
,
// n
1
,
// k
0
,
// ho
0
// wo
});
const
auto
residual_g_n_k_wos_desc
=
HostTensorDescriptor
({
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
]},
{
conv_param
.
K_
,
// g
0
,
// n
1
,
// k
0
,
// ho
0
// wo
});
const
auto
out_g_n_k_wos_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
]},
{
conv_param
.
K_
,
// g
conv_param
.
output_spatial_lengths_
[
0
]
*
conv_param
.
output_spatial_lengths_
[
1
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// n
1
,
// k
conv_param
.
output_spatial_lengths_
[
1
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// ho
conv_param
.
G_
*
conv_param
.
K_
// wo
});
return
run_grouped_conv_fwd_bias_relu_add
<
2
,
InKernelDataType
,
WeiKernelDataType
,
CShuffleDataType
,
OutKernelDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
,
InUserDataType
,
WeiUserDataType
,
OutUserDataType
,
DeviceGroupedConvNDFwdInstance
<
2
,
InLayout
,
WeiLayout
,
BiasLayout
,
ResidualLayout
,
OutLayout
>>
(
do_verification
,
init_method
,
time_kernel
,
conv_param
,
in_g_n_c_wis_desc
,
wei_g_k_c_xs_desc
,
bias_g_n_k_wos_desc
,
residual_g_n_k_wos_desc
,
out_g_n_k_wos_desc
,
in_element_op
,
wei_element_op
,
out_element_op
);
}
else
if
(
conv_param
.
num_dim_spatial_
==
3
)
{
using
InLayout
=
ctc
::
G_NDHW_C
;
using
WeiLayout
=
ctc
::
G_K_ZYX_C
;
using
BiasLayout
=
ctc
::
G_K
;
using
ResidualLayout
=
ctc
::
G_NDHW_K
;
using
OutLayout
=
ctc
::
G_NDHW_K
;
const
auto
in_g_n_c_wis_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
C_
,
conv_param
.
input_spatial_lengths_
[
0
],
conv_param
.
input_spatial_lengths_
[
1
],
conv_param
.
input_spatial_lengths_
[
2
]},
{
conv_param
.
C_
,
// g
conv_param
.
input_spatial_lengths_
[
0
]
*
conv_param
.
input_spatial_lengths_
[
1
]
*
conv_param
.
input_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// n
1
,
// c
conv_param
.
input_spatial_lengths_
[
1
]
*
conv_param
.
input_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// di
conv_param
.
input_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
C_
,
// hi
conv_param
.
G_
*
conv_param
.
C_
// wi
});
const
auto
wei_g_k_c_xs_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
K_
,
conv_param
.
C_
,
conv_param
.
filter_spatial_lengths_
[
0
],
conv_param
.
filter_spatial_lengths_
[
1
],
conv_param
.
filter_spatial_lengths_
[
2
]},
{
conv_param
.
K_
*
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
filter_spatial_lengths_
[
2
]
*
conv_param
.
C_
,
// g
conv_param
.
filter_spatial_lengths_
[
0
]
*
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
filter_spatial_lengths_
[
2
]
*
conv_param
.
C_
,
// k
1
,
// c
conv_param
.
filter_spatial_lengths_
[
1
]
*
conv_param
.
filter_spatial_lengths_
[
2
]
*
conv_param
.
C_
,
// z
conv_param
.
filter_spatial_lengths_
[
2
]
*
conv_param
.
C_
,
// y
conv_param
.
C_
// x
});
const
auto
bias_g_n_k_wos_desc
=
HostTensorDescriptor
({
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
],
conv_param
.
output_spatial_lengths_
[
2
]},
{
conv_param
.
K_
,
// g
0
,
// n
1
,
// k
0
,
// z
0
,
// y
0
// x
});
const
auto
residual_g_n_k_wos_desc
=
HostTensorDescriptor
({
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
],
conv_param
.
output_spatial_lengths_
[
2
]},
{
conv_param
.
K_
,
// g
0
,
// n
1
,
// k
0
,
// z
0
,
// y
0
// x
});
const
auto
out_g_n_k_wos_desc
=
HostTensorDescriptor
(
{
conv_param
.
G_
,
conv_param
.
N_
,
conv_param
.
K_
,
conv_param
.
output_spatial_lengths_
[
0
],
conv_param
.
output_spatial_lengths_
[
1
],
conv_param
.
output_spatial_lengths_
[
2
]},
{
conv_param
.
K_
,
// g
conv_param
.
output_spatial_lengths_
[
0
]
*
conv_param
.
output_spatial_lengths_
[
1
]
*
conv_param
.
output_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// n
1
,
// k
conv_param
.
output_spatial_lengths_
[
1
]
*
conv_param
.
output_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// do
conv_param
.
output_spatial_lengths_
[
2
]
*
conv_param
.
G_
*
conv_param
.
K_
,
// ho
conv_param
.
G_
*
conv_param
.
K_
// wo
});
return
run_grouped_conv_fwd_bias_relu_add
<
3
,
InKernelDataType
,
WeiKernelDataType
,
CShuffleDataType
,
OutKernelDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
,
InUserDataType
,
WeiUserDataType
,
OutUserDataType
,
DeviceGroupedConvNDFwdInstance
<
3
,
InLayout
,
WeiLayout
,
BiasLayout
,
ResidualLayout
,
OutLayout
>>
(
do_verification
,
init_method
,
time_kernel
,
conv_param
,
in_g_n_c_wis_desc
,
wei_g_k_c_xs_desc
,
bias_g_n_k_wos_desc
,
residual_g_n_k_wos_desc
,
out_g_n_k_wos_desc
,
in_element_op
,
wei_element_op
,
out_element_op
);
}
return
0
;
}
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_bf16.cpp
View file @
4698993d
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
...
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp16.cpp
View file @
4698993d
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
...
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp32.cpp
View file @
4698993d
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
...
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int4.cpp
View file @
4698993d
...
@@ -27,6 +27,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
...
@@ -27,6 +27,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
...
example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int8.cpp
View file @
4698993d
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
...
@@ -23,6 +23,7 @@ Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
...
example/31_batched_gemm_gemm/run_batched_gemm_gemm_example.inc
View file @
4698993d
...
@@ -106,15 +106,15 @@ bool run_batched_gemm_gemm_example(int argc, char* argv[])
...
@@ -106,15 +106,15 @@ bool run_batched_gemm_gemm_example(int argc, char* argv[])
std
::
size_t
stride
,
std
::
size_t
stride
,
std
::
size_t
batch_stride
,
std
::
size_t
batch_stride
,
auto
layout
)
{
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
stride
,
1_
uz
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
stride
,
1
}));
}
}
else
else
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
1_
uz
,
stride
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
1
,
stride
}));
}
}
};
};
...
@@ -270,7 +270,7 @@ bool run_batched_gemm_gemm_example(int argc, char* argv[])
...
@@ -270,7 +270,7 @@ bool run_batched_gemm_gemm_example(int argc, char* argv[])
c_g_m_o_device_buf
.
FromDevice
(
c_g_m_o_device_result
.
mData
.
data
());
c_g_m_o_device_buf
.
FromDevice
(
c_g_m_o_device_result
.
mData
.
data
());
#endif
#endif
return
ck
::
utils
::
check_err
(
c_g_m_o_device_result
.
mData
,
c_g_m_o_host_result
.
mData
);
return
ck
::
utils
::
check_err
(
c_g_m_o_device_result
,
c_g_m_o_host_result
);
}
}
return
true
;
return
true
;
...
...
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_lower_triangle_scale_softmax_gemm_permute_xdl_fp16.cpp
View file @
4698993d
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
...
...
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
View file @
4698993d
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
...
...
example/32_batched_gemm_scale_softmax_gemm/batched_gemm_scale_softmax_gemm_xdl_fp16.cpp
View file @
4698993d
...
@@ -23,6 +23,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
...
@@ -23,6 +23,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
...
@@ -245,15 +246,15 @@ int main(int argc, char* argv[])
...
@@ -245,15 +246,15 @@ int main(int argc, char* argv[])
std
::
size_t
stride
,
std
::
size_t
stride
,
std
::
size_t
batch_stride
,
std
::
size_t
batch_stride
,
auto
layout
)
{
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
stride
,
1
_uz
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
stride
,
1
}));
}
}
else
else
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
1
_uz
,
stride
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
1
,
stride
}));
}
}
};
};
...
@@ -391,7 +392,7 @@ int main(int argc, char* argv[])
...
@@ -391,7 +392,7 @@ int main(int argc, char* argv[])
ref_gemm1_invoker
.
Run
(
ref_gemm1_argument
);
ref_gemm1_invoker
.
Run
(
ref_gemm1_argument
);
return
ck
::
utils
::
check_err
(
c_g_m_o_device_result
.
mData
,
c_g_m_o_host_result
.
mData
)
?
0
:
1
;
return
ck
::
utils
::
check_err
(
c_g_m_o_device_result
,
c_g_m_o_host_result
)
?
0
:
1
;
}
}
return
0
;
return
0
;
...
...
example/32_batched_gemm_scale_softmax_gemm/grouped_gemm_scale_softmax_gemm_permute_xdl_fp16.cpp
View file @
4698993d
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
...
@@ -24,6 +24,7 @@ Gemm + Softmax + Gemm fused operation. Computes C_g_m_o = Softmax(A_g_m_k * B0_g
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
...
...
example/32_batched_gemm_scale_softmax_gemm/run_batched_gemm_scale_softmax_gemm_permute.inc
View file @
4698993d
...
@@ -22,7 +22,7 @@ int run(int argc, char* argv[])
...
@@ -22,7 +22,7 @@ int run(int argc, char* argv[])
float
alpha
=
1
;
float
alpha
=
1
;
bool
input_permute
=
false
;
bool
input_permute
=
false
;
bool
output_permute
=
true
;
bool
output_permute
=
true
;
if
(
argc
==
1
)
if
(
argc
==
1
)
...
@@ -50,7 +50,7 @@ int run(int argc, char* argv[])
...
@@ -50,7 +50,7 @@ int run(int argc, char* argv[])
alpha
=
std
::
stof
(
argv
[
10
]);
alpha
=
std
::
stof
(
argv
[
10
]);
input_permute
=
std
::
stoi
(
argv
[
11
]);
input_permute
=
std
::
stoi
(
argv
[
11
]);
output_permute
=
std
::
stoi
(
argv
[
12
]);
output_permute
=
std
::
stoi
(
argv
[
12
]);
}
}
else
else
...
...
example/32_batched_gemm_scale_softmax_gemm/run_grouped_gemm_scale_softmax_gemm_permute.inc
View file @
4698993d
...
@@ -7,7 +7,7 @@ int run(int argc, char* argv[])
...
@@ -7,7 +7,7 @@ int run(int argc, char* argv[])
int
init_method
=
1
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
bool
time_kernel
=
false
;
bool
input_permute
=
false
;
bool
input_permute
=
false
;
bool
output_permute
=
true
;
bool
output_permute
=
true
;
if
(
argc
==
1
)
if
(
argc
==
1
)
...
@@ -26,7 +26,7 @@ int run(int argc, char* argv[])
...
@@ -26,7 +26,7 @@ int run(int argc, char* argv[])
init_method
=
std
::
stoi
(
argv
[
2
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
input_permute
=
std
::
stoi
(
argv
[
4
]);
input_permute
=
std
::
stoi
(
argv
[
4
]);
output_permute
=
std
::
stoi
(
argv
[
5
]);
output_permute
=
std
::
stoi
(
argv
[
5
]);
}
}
else
else
...
@@ -66,10 +66,10 @@ int run(int argc, char* argv[])
...
@@ -66,10 +66,10 @@ int run(int argc, char* argv[])
std
::
cout
<<
"group count "
<<
group_count
<<
". printing first 4 groups
\n
"
;
std
::
cout
<<
"group count "
<<
group_count
<<
". printing first 4 groups
\n
"
;
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
{
{
int
M
=
128
*
(
rand
()
%
8
+
1
);
int
M
=
128
*
(
rand
()
%
8
+
1
);
int
N
=
128
*
(
rand
()
%
8
+
1
);
int
N
=
128
*
(
rand
()
%
8
+
1
);
int
K
=
40
;
int
K
=
40
;
int
O
=
40
*
(
rand
()
%
2
+
1
);
int
O
=
40
*
(
rand
()
%
2
+
1
);
int
G0
=
rand
()
%
3
+
1
;
int
G0
=
rand
()
%
3
+
1
;
int
G1
=
rand
()
%
5
+
1
;
int
G1
=
rand
()
%
5
+
1
;
...
@@ -228,12 +228,12 @@ int run(int argc, char* argv[])
...
@@ -228,12 +228,12 @@ int run(int argc, char* argv[])
{
{
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
for
(
std
::
size_t
i
=
0
;
i
<
group_count
;
i
++
)
{
{
const
int
&
G0
=
g0_g1_m_n_k_o
[
i
][
0
];
const
int
&
G0
=
g0_g1_m_n_k_o
[
i
][
0
];
const
int
&
G1
=
g0_g1_m_n_k_o
[
i
][
1
];
const
int
&
G1
=
g0_g1_m_n_k_o
[
i
][
1
];
const
int
&
M
=
g0_g1_m_n_k_o
[
i
][
2
];
const
int
&
M
=
g0_g1_m_n_k_o
[
i
][
2
];
const
int
&
N
=
g0_g1_m_n_k_o
[
i
][
3
];
const
int
&
N
=
g0_g1_m_n_k_o
[
i
][
3
];
const
int
&
K
=
g0_g1_m_n_k_o
[
i
][
4
];
const
int
&
K
=
g0_g1_m_n_k_o
[
i
][
4
];
const
int
&
O
=
g0_g1_m_n_k_o
[
i
][
5
];
const
int
&
O
=
g0_g1_m_n_k_o
[
i
][
5
];
const
auto
&
c_gs_ms_os_lengths
=
problem_descs
[
i
]
.
c_gs_ms_os_lengths
;
const
auto
&
c_gs_ms_os_lengths
=
problem_descs
[
i
]
.
c_gs_ms_os_lengths
;
const
auto
&
c_gs_ms_os_strides
=
problem_descs
[
i
]
.
c_gs_ms_os_strides
;
const
auto
&
c_gs_ms_os_strides
=
problem_descs
[
i
]
.
c_gs_ms_os_strides
;
...
...
example/33_multiple_reduce/dual_reduce_common.hpp
View file @
4698993d
...
@@ -12,6 +12,7 @@
...
@@ -12,6 +12,7 @@
#include "ck/utility/reduction_enums.hpp"
#include "ck/utility/reduction_enums.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/utility/data_type.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
@@ -253,10 +254,10 @@ int mean_meansquare_dual_reduce_test(size_t n,
...
@@ -253,10 +254,10 @@ int mean_meansquare_dual_reduce_test(size_t n,
std
::
array
<
ck
::
index_t
,
NumOutputDim
>
i_outLengths
;
std
::
array
<
ck
::
index_t
,
NumOutputDim
>
i_outLengths
;
std
::
array
<
ck
::
index_t
,
NumOutputDim
>
i_outStrides
;
std
::
array
<
ck
::
index_t
,
NumOutputDim
>
i_outStrides
;
std
::
copy
(
inLengths
.
begin
(),
inLengths
.
end
()
,
i_inLengths
.
begin
());
ck
::
ranges
::
copy
(
inLengths
,
i_inLengths
.
begin
());
std
::
copy
(
inStrides
.
begin
(),
inStrides
.
end
()
,
i_inStrides
.
begin
());
ck
::
ranges
::
copy
(
inStrides
,
i_inStrides
.
begin
());
std
::
copy
(
outLengths
.
begin
(),
outLengths
.
end
()
,
i_outLengths
.
begin
());
ck
::
ranges
::
copy
(
outLengths
,
i_outLengths
.
begin
());
std
::
copy
(
outStrides
.
begin
(),
outStrides
.
end
()
,
i_outStrides
.
begin
());
ck
::
ranges
::
copy
(
outStrides
,
i_outStrides
.
begin
());
auto
dual_reduce_op
=
DeviceDualReduce
{};
auto
dual_reduce_op
=
DeviceDualReduce
{};
...
@@ -305,8 +306,8 @@ int mean_meansquare_dual_reduce_test(size_t n,
...
@@ -305,8 +306,8 @@ int mean_meansquare_dual_reduce_test(size_t n,
{
{
mean_dev
.
FromDevice
(
mean
.
mData
.
data
());
mean_dev
.
FromDevice
(
mean
.
mData
.
data
());
meansquare_dev
.
FromDevice
(
meansquare
.
mData
.
data
());
meansquare_dev
.
FromDevice
(
meansquare
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
mean
.
mData
,
mean_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
mean
,
mean_ref
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
meansquare
.
mData
,
meansquare_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
meansquare
,
meansquare_ref
);
};
};
return
(
pass
?
0
:
1
);
return
(
pass
?
0
:
1
);
...
...
example/34_batchnorm/batchnorm_forward_nhwc.cpp
View file @
4698993d
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <getopt.h>
#include <getopt.h>
#include "ck/ck.hpp"
#include "ck/ck.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
@@ -263,14 +264,10 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
...
@@ -263,14 +264,10 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
i_inOutLengths
.
begin
());
ck
::
ranges
::
copy
(
inOutLengths
,
i_inOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
i_inOutStrides
.
begin
());
ck
::
ranges
::
copy
(
inOutStrides
,
i_inOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
ck
::
ranges
::
copy
(
scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarLengths
.
begin
());
scaleBiasMeanVarLengths
.
end
(),
ck
::
ranges
::
copy
(
scaleBiasMeanVarStrides
,
i_scaleBiasMeanVarStrides
.
begin
());
i_scaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
i_scaleBiasMeanVarStrides
.
begin
());
using
PassThroughOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
PassThroughOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
...
@@ -413,7 +410,7 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
...
@@ -413,7 +410,7 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
.
mData
,
y_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
,
y_ref
);
if
(
updateMovingAverage
)
if
(
updateMovingAverage
)
{
{
...
@@ -423,10 +420,8 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
...
@@ -423,10 +420,8 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
resultRunningMean_dev
.
FromDevice
(
resultRunningMean
.
mData
.
data
());
resultRunningMean_dev
.
FromDevice
(
resultRunningMean
.
mData
.
data
());
resultRunningVariance_dev
.
FromDevice
(
resultRunningVariance
.
mData
.
data
());
resultRunningVariance_dev
.
FromDevice
(
resultRunningVariance
.
mData
.
data
());
pass
=
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultRunningMean
,
resultRunningMean_ref
);
pass
&&
ck
::
utils
::
check_err
(
resultRunningMean
.
mData
,
resultRunningMean_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultRunningVariance
,
resultRunningVariance_ref
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultRunningVariance
.
mData
,
resultRunningVariance_ref
.
mData
);
};
};
if
(
saveMeanAndInvVariance
)
if
(
saveMeanAndInvVariance
)
...
@@ -439,9 +434,8 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
...
@@ -439,9 +434,8 @@ bool bnorm_fwd_nhwc_test(bool do_verification,
resultSaveMean_dev
.
FromDevice
(
resultSaveMean
.
mData
.
data
());
resultSaveMean_dev
.
FromDevice
(
resultSaveMean
.
mData
.
data
());
resultSaveInvVariance_dev
.
FromDevice
(
resultSaveInvVariance
.
mData
.
data
());
resultSaveInvVariance_dev
.
FromDevice
(
resultSaveInvVariance
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveMean
.
mData
,
resultSaveMean_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveMean
,
resultSaveMean_ref
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveInvVariance
.
mData
,
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveInvVariance
,
resultSaveInvVariance_ref
);
resultSaveInvVariance_ref
.
mData
);
};
};
};
};
...
...
example/34_batchnorm/batchnorm_infer_nhwc.cpp
View file @
4698993d
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <getopt.h>
#include <getopt.h>
#include "ck/ck.hpp"
#include "ck/ck.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
@@ -220,14 +221,10 @@ bool bnorm_infer_nhwc_test(bool do_verification,
...
@@ -220,14 +221,10 @@ bool bnorm_infer_nhwc_test(bool do_verification,
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
i_inOutLengths
.
begin
());
ck
::
ranges
::
copy
(
inOutLengths
,
i_inOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
i_inOutStrides
.
begin
());
ck
::
ranges
::
copy
(
inOutStrides
,
i_inOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
ck
::
ranges
::
copy
(
scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarLengths
.
begin
());
scaleBiasMeanVarLengths
.
end
(),
ck
::
ranges
::
copy
(
scaleBiasMeanVarStrides
,
i_scaleBiasMeanVarStrides
.
begin
());
i_scaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
i_scaleBiasMeanVarStrides
.
begin
());
int
result
=
0
;
int
result
=
0
;
...
@@ -302,7 +299,7 @@ bool bnorm_infer_nhwc_test(bool do_verification,
...
@@ -302,7 +299,7 @@ bool bnorm_infer_nhwc_test(bool do_verification,
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
.
mData
,
y_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
,
y_ref
);
};
};
return
(
pass
);
return
(
pass
);
...
...
example/35_splitK_gemm/run_splitK_gemm_example.inc
View file @
4698993d
...
@@ -34,15 +34,15 @@ bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& con
...
@@ -34,15 +34,15 @@ bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& con
auto
f_host_tensor_descriptor
=
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
return
HostTensorDescriptor
({
row
,
col
},
{
stride
,
1_
uz
});
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
}
else
else
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
return
HostTensorDescriptor
({
row
,
col
},
{
1_
uz
,
stride
});
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
}
};
};
...
@@ -146,15 +146,12 @@ bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& con
...
@@ -146,15 +146,12 @@ bool run_splitK_gemm(const ProblemSize& problem_size, const ExecutionConfig& con
if
(
std
::
is_same
<
CDataType
,
ck
::
half_t
>::
value
)
if
(
std
::
is_same
<
CDataType
,
ck
::
half_t
>::
value
)
{
{
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
pass
&=
ck
::
utils
::
check_err
(
c_m_n_host_result
.
mData
,
c_m_n_device_result
,
c_m_n_host_result
,
"fp16 incorrect result"
,
3
e
-
3
,
1
e
-
3
);
"fp16 incorrect result"
,
3
e
-
3
,
1
e
-
3
);
}
}
else
else
{
{
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
);
pass
&=
ck
::
utils
::
check_err
(
c_m_n_device_result
,
c_m_n_host_result
);
}
}
}
}
...
...
example/36_sparse_embedding/sparse_embedding3_forward_layernorm.cpp
View file @
4698993d
...
@@ -86,12 +86,10 @@ int main()
...
@@ -86,12 +86,10 @@ int main()
constexpr
auto
index_length
=
2048
;
constexpr
auto
index_length
=
2048
;
constexpr
AccDataType
epsilon
=
1e-4
;
constexpr
AccDataType
epsilon
=
1e-4
;
auto
f_host_tensor_desc_1d
=
[](
std
::
size_t
len_
)
{
auto
f_host_tensor_desc_1d
=
[](
std
::
size_t
len_
)
{
return
HostTensorDescriptor
({
len_
});
};
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
len_
}));
};
auto
f_host_tensor_desc_2d
=
[](
std
::
size_t
rows_
,
std
::
size_t
cols_
)
{
auto
f_host_tensor_desc_2d
=
[](
std
::
size_t
rows_
,
std
::
size_t
cols_
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
(
{
rows_
,
cols_
})
)
;
return
HostTensorDescriptor
({
rows_
,
cols_
});
};
};
using
ReferenceInstance
=
using
ReferenceInstance
=
...
@@ -203,8 +201,7 @@ int main()
...
@@ -203,8 +201,7 @@ int main()
ref_invoker
.
Run
(
ref_argument
);
ref_invoker
.
Run
(
ref_argument
);
out_dev
.
FromDevice
(
out_from_dev
.
mData
.
data
());
out_dev
.
FromDevice
(
out_from_dev
.
mData
.
data
());
pass
&=
ck
::
utils
::
check_err
(
pass
&=
ck
::
utils
::
check_err
(
out_from_dev
,
out
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
out_from_dev
.
mData
,
out
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
}
}
double
total_read
=
current_dim
*
index_length
*
3
*
sizeof
(
EmbType
)
+
double
total_read
=
current_dim
*
index_length
*
3
*
sizeof
(
EmbType
)
+
...
...
example/37_batched_gemm_add_add_relu_gemm_add/batched_gemm_add_add_relu_gemm_add_xdl_fp16.cpp
View file @
4698993d
...
@@ -19,6 +19,7 @@ Computes C_m_o = Relu(A0[m, k] * B0[n, k] + D00[m, n] + D01[mn]) * B1[n, o] + D1
...
@@ -19,6 +19,7 @@ Computes C_m_o = Relu(A0[m, k] * B0[n, k] + D00[m, n] + D01[mn]) * B1[n, o] + D1
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/literals.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
...
@@ -314,15 +315,15 @@ int main(int argc, char* argv[])
...
@@ -314,15 +315,15 @@ int main(int argc, char* argv[])
std
::
size_t
stride
,
std
::
size_t
stride
,
std
::
size_t
batch_stride
,
std
::
size_t
batch_stride
,
auto
layout
)
{
auto
layout
)
{
using
namespace
ck
::
literals
;
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
if
(
std
::
is_same
<
decltype
(
layout
),
Row
>::
value
)
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
stride
,
1
_uz
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
stride
,
1
}));
}
}
else
else
{
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
batch_count
,
row
,
col
}),
return
HostTensorDescriptor
({
batch_count
,
row
,
col
},
{
batch_stride
,
1
_uz
,
stride
});
std
::
vector
<
std
::
size_t
>
({
batch_stride
,
1
,
stride
}));
}
}
};
};
...
@@ -511,8 +512,7 @@ int main(int argc, char* argv[])
...
@@ -511,8 +512,7 @@ int main(int argc, char* argv[])
cde1_element_op
(
e1_g_m_o_host_result
(
idx
),
c1_g_m_o
(
idx
),
d1_g_m_o
(
idx
));
cde1_element_op
(
e1_g_m_o_host_result
(
idx
),
c1_g_m_o
(
idx
),
d1_g_m_o
(
idx
));
});
});
return
ck
::
utils
::
check_err
(
e1_g_m_o_device_result
.
mData
,
e1_g_m_o_host_result
.
mData
)
?
0
return
ck
::
utils
::
check_err
(
e1_g_m_o_device_result
,
e1_g_m_o_host_result
)
?
0
:
1
;
:
1
;
}
}
return
0
;
return
0
;
...
...
example/38_grouped_conv_bwd_data_multiple_d/common.hpp
View file @
4698993d
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
#include "ck/library/utility/convolution_parameter.hpp"
...
...
Prev
1
2
3
4
5
6
7
8
9
…
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment