Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
f0224f2a
Commit
f0224f2a
authored
Nov 29, 2022
by
letaoqin
Browse files
Merge branch 'develop' into dl_conv_multiple_d
parents
befc2638
0e9c88ce
Changes
271
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1084 additions
and
243 deletions
+1084
-243
example/41_grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int4.cpp
..._grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int4.cpp
+1
-0
example/41_grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int8.cpp
..._grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int8.cpp
+1
-0
example/41_grouped_conv_conv_fwd/run_grouped_conv_conv_fwd_example.inc
...ouped_conv_conv_fwd/run_grouped_conv_conv_fwd_example.inc
+7
-11
example/42_groupnorm/groupnorm_sigmoid_fp16.cpp
example/42_groupnorm/groupnorm_sigmoid_fp16.cpp
+4
-4
example/44_conv2d_fwd_quant/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp
...t/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp
+2
-1
example/44_conv2d_fwd_quant/conv2d_fwd_xdl_perlayer_quantization_int8.cpp
...d_fwd_quant/conv2d_fwd_xdl_perlayer_quantization_int8.cpp
+2
-1
example/44_elementwise_permute/elementwise_permute_4D_fp16.cpp
...le/44_elementwise_permute/elementwise_permute_4D_fp16.cpp
+2
-1
example/CMakeLists.txt
example/CMakeLists.txt
+2
-0
include/ck/ck.hpp
include/ck/ck.hpp
+7
-0
include/ck/tensor_operation/gpu/device/device_batchnorm_backward.hpp
...tensor_operation/gpu/device/device_batchnorm_backward.hpp
+51
-0
include/ck/tensor_operation/gpu/device/device_batchnorm_forward.hpp
.../tensor_operation/gpu/device/device_batchnorm_forward.hpp
+27
-4
include/ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp
...ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp
+29
-3
include/ck/tensor_operation/gpu/device/device_grouped_conv_bwd_data.hpp
...sor_operation/gpu/device/device_grouped_conv_bwd_data.hpp
+0
-49
include/ck/tensor_operation/gpu/device/device_grouped_conv_bwd_data_multiple_d.hpp
...on/gpu/device/device_grouped_conv_bwd_data_multiple_d.hpp
+1
-96
include/ck/tensor_operation/gpu/device/device_grouped_conv_bwd_weight.hpp
...r_operation/gpu/device/device_grouped_conv_bwd_weight.hpp
+11
-10
include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd.hpp
...k/tensor_operation/gpu/device/device_grouped_conv_fwd.hpp
+21
-22
include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multi_d_xdl.hpp
...ation/gpu/device/impl/device_batched_gemm_multi_d_xdl.hpp
+1
-1
include/ck/tensor_operation/gpu/device/impl/device_batchnorm_backward_impl.hpp
...ration/gpu/device/impl/device_batchnorm_backward_impl.hpp
+866
-0
include/ck/tensor_operation/gpu/device/impl/device_batchnorm_forward_impl.hpp
...eration/gpu/device/impl/device_batchnorm_forward_impl.hpp
+9
-2
include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp
...e_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp
+40
-38
No files found.
example/41_grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int4.cpp
View file @
f0224f2a
...
...
@@ -15,6 +15,7 @@
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_gemm_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
example/41_grouped_conv_conv_fwd/grouped_conv_conv_fwd_xdl_int8.cpp
View file @
f0224f2a
...
...
@@ -11,6 +11,7 @@
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_gemm_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
example/41_grouped_conv_conv_fwd/run_grouped_conv_conv_fwd_example.inc
View file @
f0224f2a
...
...
@@ -97,7 +97,7 @@ bool run_grouped_conv_conv_fwd(bool do_verification,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input1_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input1_right_pads
{};
auto
copy
=
[](
auto
&
x
,
auto
&
y
)
{
std
::
copy
(
x
.
begin
(),
x
.
end
()
,
y
.
begin
());
};
auto
copy
=
[](
const
auto
&
x
,
auto
&
y
)
{
ck
::
ranges
::
copy
(
x
,
y
.
begin
());
};
copy
(
in0_g_n_c_wis_desc
.
GetLengths
(),
a0_g_n_c_wis_lengths
);
copy
(
in0_g_n_c_wis_desc
.
GetStrides
(),
a0_g_n_c_wis_strides
);
...
...
@@ -120,18 +120,14 @@ bool run_grouped_conv_conv_fwd(bool do_verification,
const
ck
::
index_t
gemm_batch
=
a0_g_n_c_wis_lengths
[
0
];
const
ck
::
index_t
gemm0_m_length
=
e1_g_n_k_wos_lengths
[
1
]
*
std
::
accumulate
(
e1_g_n_k_wos_lengths
.
begin
()
+
3
,
e1_g_n_k_wos_lengths
.
begin
()
+
3
+
NDimSpatial
,
ck
::
index_t
{
1
},
std
::
multiplies
<
ck
::
index_t
>
{});
e1_g_n_k_wos_lengths
[
1
]
*
ck
::
accumulate_n
<
ck
::
index_t
>
(
e1_g_n_k_wos_lengths
.
begin
()
+
3
,
NDimSpatial
,
1
,
std
::
multiplies
<>
{});
const
ck
::
index_t
gemm0_n_length
=
b0_g_k_c_xs_lengths
[
1
];
const
ck
::
index_t
gemm0_k_length
=
std
::
accumulate
(
b0_g_k_c_xs_lengths
.
begin
()
+
2
,
b0_g_k_c_xs_lengths
.
begin
()
+
2
+
NDimSpatial
+
1
,
ck
::
index_t
{
1
},
std
::
multiplies
<
ck
::
index_t
>
{});
const
ck
::
index_t
gemm0_k_length
=
ck
::
accumulate_n
<
ck
::
index_t
>
(
b0_g_k_c_xs_lengths
.
begin
()
+
2
,
NDimSpatial
+
1
,
1
,
std
::
multiplies
<>
{});
const
ck
::
index_t
gemm1_n_length
=
b1_g_k_c_xs_lengths
[
1
];
...
...
@@ -261,7 +257,7 @@ bool run_grouped_conv_conv_fwd(bool do_verification,
#endif
return
ck
::
utils
::
check_err
(
out1_device
.
mData
,
out1_host
.
mData
,
"Error: incorrect results!"
,
1
e
-
5
f
,
1
e
-
4
f
);
out1_device
,
out1_host
,
"Error: incorrect results!"
,
1
e
-
5
f
,
1
e
-
4
f
);
}
return
true
;
...
...
example/42_groupnorm/groupnorm_sigmoid_fp16.cpp
View file @
f0224f2a
...
...
@@ -100,9 +100,9 @@ int main(int argc, char* argv[])
Tensor
<
GammaDataType
>
gamma
({
G
,
C
});
Tensor
<
BetaDataType
>
beta
({
G
,
C
});
ck
::
utils
::
FillUniformDistribution
<
XDataType
>
{
0.
f
,
1.
f
}(
x
.
begin
(),
x
.
end
()
);
ck
::
utils
::
FillUniformDistribution
<
GammaDataType
>
{
0.
f
,
1.
f
}(
gamma
.
begin
(),
gamma
.
end
()
);
ck
::
utils
::
FillUniformDistribution
<
BetaDataType
>
{
0.
f
,
1.
f
}(
beta
.
begin
(),
beta
.
end
()
);
ck
::
utils
::
FillUniformDistribution
<
XDataType
>
{
0.
f
,
1.
f
}(
x
);
ck
::
utils
::
FillUniformDistribution
<
GammaDataType
>
{
0.
f
,
1.
f
}(
gamma
);
ck
::
utils
::
FillUniformDistribution
<
BetaDataType
>
{
0.
f
,
1.
f
}(
beta
);
DeviceMem
x_dev
(
sizeof
(
XDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
gamma_dev
(
sizeof
(
GammaDataType
)
*
gamma
.
mDesc
.
GetElementSpaceSize
());
...
...
@@ -167,7 +167,7 @@ int main(int argc, char* argv[])
ref_invoker
.
Run
(
ref_argument
);
y_dev
.
FromDevice
(
y
.
mData
.
data
());
pass
&=
ck
::
utils
::
check_err
(
y
.
mData
,
host_y
.
mData
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
pass
&=
ck
::
utils
::
check_err
(
y
,
host_y
,
"Error: Incorrect results"
,
1e-3
,
1e-3
);
}
return
(
pass
?
0
:
1
);
...
...
example/44_conv2d_fwd_quant/conv2d_fwd_xdl_bias_relu_perlayer_quantization_int8.cpp
View file @
f0224f2a
...
...
@@ -6,6 +6,7 @@
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
@@ -144,7 +145,7 @@ bool run_grouped_conv_fwd(bool do_verification,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
{};
auto
copy
=
[](
auto
&
x
,
auto
&
y
)
{
std
::
copy
(
x
.
begin
(),
x
.
end
()
,
y
.
begin
());
};
auto
copy
=
[](
auto
&
x
,
auto
&
y
)
{
ck
::
ranges
::
copy
(
x
,
y
.
begin
());
};
copy
(
in_g_n_c_wis_desc
.
GetLengths
(),
a_g_n_c_wis_lengths
);
copy
(
in_g_n_c_wis_desc
.
GetStrides
(),
a_g_n_c_wis_strides
);
...
...
example/44_conv2d_fwd_quant/conv2d_fwd_xdl_perlayer_quantization_int8.cpp
View file @
f0224f2a
...
...
@@ -6,6 +6,7 @@
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
@@ -131,7 +132,7 @@ bool run_grouped_conv_fwd(bool do_verification,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
{};
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
{};
auto
copy
=
[](
auto
&
x
,
auto
&
y
)
{
std
::
copy
(
x
.
begin
(),
x
.
end
()
,
y
.
begin
());
};
auto
copy
=
[](
auto
&
x
,
auto
&
y
)
{
ck
::
ranges
::
copy
(
x
,
y
.
begin
());
};
copy
(
in_g_n_c_wis_desc
.
GetLengths
(),
a_g_n_c_wis_lengths
);
copy
(
in_g_n_c_wis_desc
.
GetStrides
(),
a_g_n_c_wis_strides
);
...
...
example/44_elementwise_permute/elementwise_permute_4D_fp16.cpp
View file @
f0224f2a
...
...
@@ -5,6 +5,7 @@
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
...
...
@@ -69,7 +70,7 @@ int main()
static_cast
<
int
>
(
nhwc
[
2
]
*
nhwc
[
3
]),
static_cast
<
int
>
(
nhwc
[
3
])};
std
::
copy
(
nchw
.
begin
(),
nchw
.
end
()
,
ab_lengths
.
begin
());
ck
::
ranges
::
copy
(
nchw
,
ab_lengths
.
begin
());
auto
broadcastPermute
=
DeviceElementwisePermuteInstance
{};
auto
argument
=
broadcastPermute
.
MakeArgumentPointer
(
...
...
example/CMakeLists.txt
View file @
f0224f2a
...
...
@@ -12,6 +12,7 @@ function(add_example_executable EXAMPLE_NAME FILE_NAME)
add_test
(
NAME
${
EXAMPLE_NAME
}
COMMAND $<TARGET_FILE:
${
EXAMPLE_NAME
}
>
${
ARGN
}
)
add_dependencies
(
examples
${
EXAMPLE_NAME
}
)
add_dependencies
(
check
${
EXAMPLE_NAME
}
)
rocm_install
(
TARGETS
${
EXAMPLE_NAME
}
COMPONENT examples
)
endfunction
(
add_example_executable EXAMPLE_NAME
)
function
(
add_example_executable_no_testing EXAMPLE_NAME FILE_NAME
)
...
...
@@ -19,6 +20,7 @@ function(add_example_executable_no_testing EXAMPLE_NAME FILE_NAME)
add_executable
(
${
EXAMPLE_NAME
}
${
FILE_NAME
}
)
target_link_libraries
(
${
EXAMPLE_NAME
}
PRIVATE utility
)
add_dependencies
(
examples
${
EXAMPLE_NAME
}
)
rocm_install
(
TARGETS
${
EXAMPLE_NAME
}
COMPONENT examples
)
endfunction
(
add_example_executable_no_testing EXAMPLE_NAME
)
# add all example subdir
...
...
include/ck/ck.hpp
View file @
f0224f2a
...
...
@@ -154,6 +154,13 @@
// tuning parameter
#define CK_WORKAROUND_SWDEV_325164 0
// workaround: a BF16 attention kernel for gfx908 is likely affected by a compiler issue
#ifdef __gfx908__
#define CK_WORKAROUND_SWDEV_XXXXXX_BF16_ATTEN_FWD_GFX908_ISSUE 1
#else // __gfx90a__, ...
#define CK_WORKAROUND_SWDEV_XXXXXX_BF16_ATTEN_FWD_GFX908_ISSUE 0
#endif // __gfx908__
namespace
ck
{
enum
struct
InMemoryDataOperationEnum
...
...
include/ck/tensor_operation/gpu/device/device_batchnorm_backward.hpp
0 → 100644
View file @
f0224f2a
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <array>
#include <memory>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/device_base.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
,
typename
DyElementwiseOp
>
struct
DeviceBatchNormBwd
:
public
BaseOperator
{
static
constexpr
index_t
NumInvariantDim
=
Rank
-
NumBatchNormReduceDim
;
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
std
::
array
<
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
index_t
,
Rank
>
dyStrides
,
const
std
::
array
<
index_t
,
Rank
>
dxStrides
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnBiasStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnMeanVarStrides
,
const
void
*
p_x
,
const
void
*
p_dy
,
const
void
*
p_scale
,
const
void
*
p_savedMean
,
const
void
*
p_savedInvVar
,
double
epsilon
,
const
DyElementwiseOp
dy_elementwise_op
,
void
*
p_dx
,
void
*
p_dscale
,
void
*
p_dbias
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
,
typename
DyElementwiseOp
>
using
DeviceBatchNormBwdPtr
=
std
::
unique_ptr
<
DeviceBatchNormBwd
<
Rank
,
NumBatchNormReduceDim
,
DyElementwiseOp
>>
;
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/device_batchnorm_forward.hpp
View file @
f0224f2a
...
...
@@ -13,7 +13,15 @@ namespace ck {
namespace
tensor_operation
{
namespace
device
{
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
,
typename
YElementwiseOp
>
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
typename
YElementwiseOp
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
struct
DeviceBatchNormFwd
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
...
...
@@ -40,9 +48,24 @@ struct DeviceBatchNormFwd : public BaseOperator
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
,
typename
YElementwiseOp
>
using
DeviceBatchNormFwdPtr
=
std
::
unique_ptr
<
DeviceBatchNormFwd
<
Rank
,
NumBatchNormReduceDim
,
YElementwiseOp
>>
;
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
typename
YElementwiseOp
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
using
DeviceBatchNormFwdPtr
=
std
::
unique_ptr
<
DeviceBatchNormFwd
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
YElementwiseOp
,
Rank
,
NumBatchNormReduceDim
>>
;
}
// namespace device
}
// namespace tensor_operation
...
...
include/ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp
View file @
f0224f2a
...
...
@@ -13,13 +13,22 @@ namespace ck {
namespace
tensor_operation
{
namespace
device
{
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
typename
YElementwiseOp
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
struct
DeviceBatchNormInfer
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
std
::
array
<
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
index_t
,
Rank
>
yStrides
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleStrides
,
const
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnBiasStrides
,
...
...
@@ -28,6 +37,7 @@ struct DeviceBatchNormInfer : public BaseOperator
const
void
*
bnScale
,
const
void
*
bnBias
,
double
epsilon
,
const
YElementwiseOp
y_elementwise_op
,
const
void
*
estimatedMean
,
const
void
*
estimatedInvVariance
,
void
*
p_y
)
=
0
;
...
...
@@ -35,8 +45,24 @@ struct DeviceBatchNormInfer : public BaseOperator
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
template
<
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
using
DeviceBatchNormInferPtr
=
std
::
unique_ptr
<
DeviceBatchNormInfer
<
Rank
,
NumBatchNormReduceDim
>>
;
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
typename
YElementwiseOp
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
using
DeviceBatchNormInferPtr
=
std
::
unique_ptr
<
DeviceBatchNormInfer
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
YElementwiseOp
,
Rank
,
NumBatchNormReduceDim
>>
;
}
// namespace device
}
// namespace tensor_operation
...
...
include/ck/tensor_operation/gpu/device/device_grouped_conv_bwd_data.hpp
deleted
100644 → 0
View file @
befc2638
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <array>
#include "ck/tensor_operation/gpu/device/device_base.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
template
<
ck
::
index_t
NDimSpatial
,
typename
InputLayout
,
typename
WeightLayout
,
typename
OutputLayout
,
typename
InputDataType
,
typename
WeightDataType
,
typename
OutputDataType
,
typename
InputElementwiseOperation
,
typename
WeightElementwiseOperation
,
typename
OutputElementwiseOperation
>
struct
DeviceGroupedConvBwdData
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
void
*
p_input
,
const
void
*
p_weight
,
const
void
*
p_output
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
input_g_n_c_wis_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
input_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
weight_g_k_c_xs_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
weight_g_k_c_xs_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
output_g_n_k_wos_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
output_g_n_k_wos_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_right_pads
,
const
InputElementwiseOperation
&
input_element_op
,
const
WeightElementwiseOperation
&
weight_element_op
,
const
OutputElementwiseOperation
&
output_element_op
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/device_grouped_conv_bwd_data_multiple_d.hpp
View file @
f0224f2a
...
...
@@ -3,10 +3,9 @@
#pragma once
#include <
vector
>
#include <
array
>
#include "ck/tensor_operation/gpu/device/device_base.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_conv_bwd_data.hpp"
namespace
ck
{
namespace
tensor_operation
{
...
...
@@ -63,100 +62,6 @@ struct DeviceGroupedConvBwdDataMultipleD : public BaseOperator
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
template
<
ck
::
index_t
NDimSpatial
,
typename
ALayout
,
typename
BLayout
,
typename
ELayout
,
typename
ADataType
,
typename
BDataType
,
typename
EDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CDEElementwiseOperation
>
struct
DeviceGroupedConvBwdDataMultipleD
<
NDimSpatial
,
ALayout
,
BLayout
,
Tuple
<>
,
ELayout
,
ADataType
,
BDataType
,
Tuple
<>
,
EDataType
,
AElementwiseOperation
,
BElementwiseOperation
,
CDEElementwiseOperation
>
:
public
DeviceGroupedConvBwdData
<
NDimSpatial
,
ELayout
,
BLayout
,
ALayout
,
EDataType
,
BDataType
,
ADataType
,
CDEElementwiseOperation
,
BElementwiseOperation
,
AElementwiseOperation
>
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_a
,
// output image
const
void
*
p_b
,
// weight
const
std
::
array
<
const
void
*
,
0
>&
,
// bias
void
*
p_e
,
// input image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
a_g_n_k_wos_lengths
,
// output image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
a_g_n_k_wos_strides
,
// output image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
b_g_k_c_xs_lengths
,
// weight
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
b_g_k_c_xs_strides
,
// weight
const
std
::
array
<
std
::
array
<
index_t
,
NDimSpatial
+
3
>
,
0
>&
,
// bias
const
std
::
array
<
std
::
array
<
index_t
,
NDimSpatial
+
3
>
,
0
>&
,
// bias
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
e_g_n_c_wis_lengths
,
// input image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
e_g_n_c_wis_strides
,
// input image
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_right_pads
,
const
AElementwiseOperation
&
a_element_op
,
const
BElementwiseOperation
&
b_element_op
,
const
CDEElementwiseOperation
&
cde_element_op
)
=
0
;
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
void
*
p_input
,
const
void
*
p_weight
,
const
void
*
p_output
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
input_g_n_c_wis_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
input_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
weight_g_k_c_xs_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
weight_g_k_c_xs_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
output_g_n_k_wos_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
output_g_n_k_wos_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_right_pads
,
const
CDEElementwiseOperation
&
input_element_op
,
const
BElementwiseOperation
&
weight_element_op
,
const
AElementwiseOperation
&
output_element_op
)
override
final
{
return
MakeArgumentPointer
(
p_output
,
p_weight
,
std
::
array
<
const
void
*
,
0
>
{},
p_input
,
output_g_n_k_wos_lengths
,
output_g_n_k_wos_strides
,
weight_g_k_c_xs_lengths
,
weight_g_k_c_xs_strides
,
std
::
array
<
std
::
array
<
index_t
,
NDimSpatial
+
3
>
,
0
>
{},
std
::
array
<
std
::
array
<
index_t
,
NDimSpatial
+
3
>
,
0
>
{},
input_g_n_c_wis_lengths
,
input_g_n_c_wis_strides
,
conv_filter_strides
,
conv_filter_dilations
,
input_left_pads
,
input_right_pads
,
output_element_op
,
weight_element_op
,
input_element_op
);
}
};
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/device_conv_bwd_weight.hpp
→
include/ck/tensor_operation/gpu/device/device_
grouped_
conv_bwd_weight.hpp
View file @
f0224f2a
...
...
@@ -3,7 +3,7 @@
#pragma once
#include <
vector
>
#include <
array
>
#include "ck/tensor_operation/gpu/device/device_base.hpp"
...
...
@@ -11,7 +11,7 @@ namespace ck {
namespace
tensor_operation
{
namespace
device
{
template
<
ck
::
index_t
N
um
DimSpatial
,
template
<
ck
::
index_t
NDimSpatial
,
typename
InLayout
,
typename
WeiLayout
,
typename
OutLayout
,
...
...
@@ -21,22 +21,23 @@ template <ck::index_t NumDimSpatial,
typename
InElementwiseOperation
,
typename
WeiElementwiseOperation
,
typename
OutElementwiseOperation
>
struct
DeviceConvBwdWeight
:
public
BaseOperator
struct
Device
Grouped
ConvBwdWeight
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_in
,
void
*
p_wei
,
const
void
*
p_out
,
ck
::
index_t
G
,
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
,
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
,
std
::
vector
<
ck
::
index_t
>
input_left_pads
,
std
::
vector
<
ck
::
index_t
>
input_right_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
,
InElementwiseOperation
in_element_op
,
WeiElementwiseOperation
wei_element_op
,
OutElementwiseOperation
out_element_op
,
...
...
include/ck/tensor_operation/gpu/device/device_grouped_conv_fwd.hpp
View file @
f0224f2a
...
...
@@ -14,39 +14,38 @@ namespace device {
// Convolution Forward:
// input : input image A[G, N, C, Hi, Wi],
// input : weight B[G, K, C, Y, X],
// input : D0[G, N, K, Ho, Wo], D1[G, N, K, Ho, Wo], ...
// output : output image E[G, N, K, Ho, Wo]
// C = a_op(A) * b_op(B)
// E = cde_op(C, D0, D1, ...)
template
<
index_t
NDimSpatial
,
typename
A
Layout
,
typename
B
Layout
,
typename
C
Layout
,
typename
A
DataType
,
typename
B
DataType
,
typename
C
DataType
,
typename
A
ElementwiseOperation
,
typename
B
ElementwiseOperation
,
typename
C
ElementwiseOperation
>
typename
In
Layout
,
typename
Wei
Layout
,
typename
Out
Layout
,
typename
In
DataType
,
typename
Wei
DataType
,
typename
Out
DataType
,
typename
In
ElementwiseOperation
,
typename
Wei
ElementwiseOperation
,
typename
Out
ElementwiseOperation
>
struct
DeviceGroupedConvFwd
:
public
BaseOperator
{
virtual
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
void
*
p_
a
,
// input image
const
void
*
p_
b
,
// weight
void
*
p_
c
,
// output image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
a
_g_n_c_wis_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
a
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
b
_g_k_c_xs_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
b
_g_k_c_xs_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
c
_g_n_k_wos_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
c
_g_n_k_wos_strides
,
MakeArgumentPointer
(
const
void
*
p_
in
,
// input image
const
void
*
p_
wei
,
// weight
void
*
p_
out
,
// output image
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
in
_g_n_c_wis_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
in
_g_n_c_wis_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
wei
_g_k_c_xs_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
wei
_g_k_c_xs_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
out
_g_n_k_wos_lengths
,
const
std
::
array
<
index_t
,
NDimSpatial
+
3
>&
out
_g_n_k_wos_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_strides
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
conv_filter_dilations
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_left_pads
,
const
std
::
array
<
index_t
,
NDimSpatial
>&
input_right_pads
,
const
A
ElementwiseOperation
&
a
_element_op
,
const
B
ElementwiseOperation
&
b
_element_op
,
const
C
ElementwiseOperation
&
c
_element_op
)
=
0
;
const
In
ElementwiseOperation
&
in
_element_op
,
const
Wei
ElementwiseOperation
&
wei
_element_op
,
const
Out
ElementwiseOperation
&
out
_element_op
)
=
0
;
virtual
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
=
0
;
};
...
...
include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multi_d_xdl.hpp
View file @
f0224f2a
...
...
@@ -700,7 +700,7 @@ struct DeviceBatchedGemmMultiD_Xdl : public DeviceBatchedGemmMultiD<ALayout,
<<
BlockSize
<<
", "
<<
MPerBlock
<<
", "
<<
NPerBlock
<<
", "
<<
KPerBlock
<<
KPerBlock
<<
", "
<<
AK1
<<
", "
<<
BK1
<<
", "
<<
getGemmSpecializationString
(
GemmSpec
)
...
...
include/ck/tensor_operation/gpu/device/impl/device_batchnorm_backward_impl.hpp
0 → 100644
View file @
f0224f2a
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iostream>
#include <sstream>
#include "ck/utility/reduction_operator.hpp"
#include "ck/tensor_operation/gpu/device/device_batchnorm_backward.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_reduce_common.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_batchnorm_backward_blockwise_welford.hpp"
#include "ck/tensor_operation/gpu/grid/batchnorm_multiblock/gridwise_multiblock_welford_first_half.hpp"
#include "ck/tensor_operation/gpu/grid/batchnorm_multiblock/gridwise_multiblock_welford_second_half_multiblock_reduce_first_half.hpp"
#include "ck/tensor_operation/gpu/grid/batchnorm_multiblock/gridwise_multiblock_reduce_second_half_batchnorm_backward_final.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/welford_helper.hpp"
#include "ck/host_utility/device_prop.hpp"
#include "ck/host_utility/kernel_launch.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
template
<
typename
XDataType
,
typename
DxDataType
,
typename
DyDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
typename
DyElementwiseOp
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
,
bool
UseMultiblockInK
,
index_t
BlockSize
,
index_t
MThreadClusterSize
,
index_t
KThreadClusterSize
,
index_t
MThreadSliceSize
,
index_t
KThreadSliceSize
,
index_t
XDyDxVectorDim
,
index_t
XSrcVectorSize
,
index_t
DySrcVectorSize
,
index_t
DxDstVectorSize
,
index_t
ScaleSrcDstVectorSize
,
index_t
BiasDstVectorSize
,
index_t
MeanVarSrcVectorSize
>
struct
DeviceBatchNormBwdImpl
:
public
DeviceBatchNormBwd
<
Rank
,
NumBatchNormReduceDim
,
DyElementwiseOp
>
{
static_assert
(
Rank
<=
6
,
"Bigger Rank size is not supported!"
);
static_assert
(
BlockSize
==
MThreadClusterSize
*
KThreadClusterSize
,
"Invalid thread cluster size assignments!"
);
static_assert
((
XDyDxVectorDim
==
0
&&
MThreadSliceSize
%
XSrcVectorSize
==
0
&&
MThreadSliceSize
%
DySrcVectorSize
==
0
&&
MThreadSliceSize
%
DxDstVectorSize
==
0
)
||
(
XDyDxVectorDim
==
1
&&
KThreadSliceSize
%
XSrcVectorSize
==
0
&&
KThreadSliceSize
%
DySrcVectorSize
==
0
&&
KThreadSliceSize
%
DxDstVectorSize
==
0
),
"Invalid thread slice sizes and/or vector sizes configuration, please check!"
);
static
constexpr
index_t
NumInvariantDim
=
Rank
-
NumBatchNormReduceDim
;
static
constexpr
index_t
M_BlockTileSize
=
MThreadClusterSize
*
MThreadSliceSize
;
static
constexpr
index_t
K_BlockTileSize
=
KThreadClusterSize
*
KThreadSliceSize
;
static
auto
MakeXY2dDescriptor
(
const
std
::
array
<
index_t
,
Rank
>&
xyLengths
,
const
std
::
array
<
index_t
,
Rank
>&
xyStrides
,
int
blkGroupSize
,
int
numBlockTileIteration
)
{
const
auto
tupleXYLengths
=
generate_tuple
([
&
](
auto
I
)
{
return
xyLengths
[
I
];
},
Number
<
Rank
>
{});
const
auto
tupleXYStrides
=
generate_tuple
([
&
](
auto
I
)
{
return
xyStrides
[
I
];
},
Number
<
Rank
>
{});
const
auto
raw_grid_desc
=
make_naive_tensor_descriptor
(
tupleXYLengths
,
tupleXYStrides
);
const
auto
grid_desc_m_k
=
[
&
]()
{
using
InvariantDims
=
typename
arithmetic_sequence_gen
<
0
,
NumInvariantDim
,
1
>::
type
;
using
ReduceDims
=
typename
arithmetic_sequence_gen
<
NumInvariantDim
,
Rank
,
1
>::
type
;
const
auto
reduceDimLengths
=
generate_tuple
([
&
](
auto
I
)
{
return
xyLengths
[
NumInvariantDim
+
I
];
},
Number
<
NumBatchNormReduceDim
>
{});
const
auto
invariantDimLengths
=
generate_tuple
([
&
](
auto
I
)
{
return
xyLengths
[
I
];
},
Number
<
NumInvariantDim
>
{});
return
transform_tensor_descriptor
(
raw_grid_desc
,
make_tuple
(
make_merge_transform
(
invariantDimLengths
),
make_merge_transform
(
reduceDimLengths
)),
make_tuple
(
InvariantDims
{},
ReduceDims
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}));
}();
const
auto
invariantLength
=
grid_desc_m_k
.
GetLength
(
Number
<
0
>
{});
const
auto
reduceLength
=
grid_desc_m_k
.
GetLength
(
Number
<
1
>
{});
const
int
workSizePerBlock
=
K_BlockTileSize
*
numBlockTileIteration
;
const
auto
mPad
=
math
::
integer_least_multiple
(
invariantLength
,
M_BlockTileSize
)
-
invariantLength
;
const
auto
kPad
=
workSizePerBlock
*
blkGroupSize
-
reduceLength
;
auto
grid_desc_m_k_padded
=
transform_tensor_descriptor
(
grid_desc_m_k
,
make_tuple
(
make_right_pad_transform
(
invariantLength
,
mPad
),
make_right_pad_transform
(
reduceLength
,
kPad
)),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}));
return
(
grid_desc_m_k_padded
);
};
static
auto
MakeMultiblockFirstReduceOutputMG2dDescriptor
(
int
invariantLength
,
int
blkGroupSize
)
{
const
auto
grid_desc_m_g
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
invariantLength
,
blkGroupSize
));
const
auto
mPad
=
math
::
integer_least_multiple
(
invariantLength
,
M_BlockTileSize
)
-
invariantLength
;
auto
grid_desc_m_g_padded
=
transform_tensor_descriptor
(
grid_desc_m_g
,
make_tuple
(
make_right_pad_transform
(
invariantLength
,
mPad
),
make_pass_through_transform
(
blkGroupSize
)),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}));
return
(
grid_desc_m_g_padded
);
};
static
auto
MakeMultiblockFinalReduceInputMK2dDescriptor
(
int
invariantLength
,
int
blkGroupSize
)
{
const
auto
reduceLength
=
blkGroupSize
;
const
auto
grid_desc_m_k
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
invariantLength
,
reduceLength
));
const
auto
mPad
=
math
::
integer_least_multiple
(
invariantLength
,
M_BlockTileSize
)
-
invariantLength
;
const
auto
kPad
=
math
::
integer_least_multiple
(
reduceLength
,
KThreadClusterSize
)
-
reduceLength
;
auto
grid_desc_m_k_padded
=
transform_tensor_descriptor
(
grid_desc_m_k
,
make_tuple
(
make_right_pad_transform
(
invariantLength
,
mPad
),
make_right_pad_transform
(
reduceLength
,
kPad
)),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}));
return
(
grid_desc_m_k_padded
);
};
static
auto
MakeScaleBiasMeanVar1dDescriptor
(
const
std
::
array
<
index_t
,
NumInvariantDim
>&
lengths
,
const
std
::
array
<
index_t
,
NumInvariantDim
>&
strides
)
{
const
auto
tupleLengths
=
generate_tuple
([
&
](
auto
I
)
{
return
lengths
[
I
];
},
Number
<
NumInvariantDim
>
{});
const
auto
tupleStrides
=
generate_tuple
([
&
](
auto
I
)
{
return
strides
[
I
];
},
Number
<
NumInvariantDim
>
{});
auto
raw_grid_desc
=
make_naive_tensor_descriptor
(
tupleLengths
,
tupleStrides
);
auto
grid_desc_m
=
transform_tensor_descriptor
(
raw_grid_desc
,
make_tuple
(
make_merge_transform
(
tupleLengths
)),
make_tuple
(
typename
arithmetic_sequence_gen
<
0
,
NumInvariantDim
,
1
>::
type
{}),
make_tuple
(
Sequence
<
0
>
{}));
const
auto
invariantLength
=
grid_desc_m
.
GetLength
(
Number
<
0
>
{});
const
auto
mPad
=
math
::
integer_least_multiple
(
invariantLength
,
M_BlockTileSize
)
-
invariantLength
;
auto
grid_desc_m_padded
=
transform_tensor_descriptor
(
grid_desc_m
,
make_tuple
(
make_right_pad_transform
(
invariantLength
,
mPad
)),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
>
{}));
return
(
grid_desc_m_padded
);
};
using
XYGridDesc_M_K
=
decltype
(
MakeXY2dDescriptor
({
1
},
{
1
},
1
,
1
));
using
ScaleBiasGridDesc_M
=
decltype
(
MakeScaleBiasMeanVar1dDescriptor
({
1
},
{
1
}));
using
MeanVarGridDesc_M
=
ScaleBiasGridDesc_M
;
struct
Argument
:
public
BaseArgument
{
Argument
(
const
std
::
array
<
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
index_t
,
Rank
>
dyStrides
,
const
std
::
array
<
index_t
,
Rank
>
dxStrides
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnBiasStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnMeanVarStrides
,
const
XDataType
*
p_x
,
const
DyDataType
*
p_dy
,
const
ScaleDataType
*
p_scale
,
const
MeanVarDataType
*
p_savedMean
,
const
MeanVarDataType
*
p_savedInvVar
,
const
DyElementwiseOp
dy_elementwise_op
,
double
epsilon
,
DxDataType
*
p_dx
,
ScaleDataType
*
p_dscale
,
BiasDataType
*
p_dbias
)
:
bnScaleBiasMeanVarLengths_
(
bnScaleBiasMeanVarLengths
),
bnScaleStrides_
(
bnScaleStrides
),
bnBiasStrides_
(
bnBiasStrides
),
bnMeanVarStrides_
(
bnMeanVarStrides
),
p_x_
(
p_x
),
p_dy_
(
p_dy
),
p_scale_
(
p_scale
),
p_savedMean_
(
p_savedMean
),
p_savedInvVar_
(
p_savedInvVar
),
dy_elementwise_op_
(
dy_elementwise_op
),
p_dx_
(
p_dx
),
p_dscale_
(
p_dscale
),
p_dbias_
(
p_dbias
)
{
xyLengths_
=
shuffle_tensor_dimensions
<
Rank
,
NumBatchNormReduceDim
>
(
xyLengths
,
reduceDims
);
xStrides_
=
shuffle_tensor_dimensions
<
Rank
,
NumBatchNormReduceDim
>
(
xStrides
,
reduceDims
);
dyStrides_
=
shuffle_tensor_dimensions
<
Rank
,
NumBatchNormReduceDim
>
(
dyStrides
,
reduceDims
);
dxStrides_
=
shuffle_tensor_dimensions
<
Rank
,
NumBatchNormReduceDim
>
(
dxStrides
,
reduceDims
);
std
::
tie
(
invariant_length
,
reduce_length
)
=
get_2d_lengths
<
Rank
,
NumBatchNormReduceDim
>
(
xyLengths_
);
epsilon_
=
type_convert
<
AccDataType
>
(
epsilon
);
haveSavedMeanInvVar_
=
(
p_savedMean_
!=
nullptr
&&
p_savedInvVar_
!=
nullptr
);
if
(
UseMultiblockInK
)
{
int
iterations
=
1
;
while
(
true
)
{
int
testBlkGroupSize
=
(
reduce_length
+
(
K_BlockTileSize
*
iterations
)
-
1
)
/
(
K_BlockTileSize
*
iterations
);
// we want the blkGroupSize be not more than 128
if
(
testBlkGroupSize
<=
128
)
break
;
iterations
++
;
};
blkGroupSize
=
(
reduce_length
+
(
K_BlockTileSize
*
iterations
)
-
1
)
/
(
K_BlockTileSize
*
iterations
);
numBlockTileIteration
=
iterations
;
}
else
{
blkGroupSize
=
1
;
numBlockTileIteration
=
(
reduce_length
+
K_BlockTileSize
-
1
)
/
K_BlockTileSize
;
};
gridSize
=
(
invariant_length
+
M_BlockTileSize
-
1
)
/
M_BlockTileSize
*
blkGroupSize
;
x_grid_desc_m_k
=
MakeXY2dDescriptor
(
xyLengths_
,
xStrides_
,
blkGroupSize
,
numBlockTileIteration
);
dy_grid_desc_m_k
=
MakeXY2dDescriptor
(
xyLengths_
,
dyStrides_
,
blkGroupSize
,
numBlockTileIteration
);
dx_grid_desc_m_k
=
MakeXY2dDescriptor
(
xyLengths_
,
dxStrides_
,
blkGroupSize
,
numBlockTileIteration
);
scale_grid_desc_m
=
MakeScaleBiasMeanVar1dDescriptor
(
bnScaleBiasMeanVarLengths
,
bnScaleStrides
);
bias_grid_desc_m
=
MakeScaleBiasMeanVar1dDescriptor
(
bnScaleBiasMeanVarLengths
,
bnBiasStrides
);
mean_var_grid_desc_m
=
MakeScaleBiasMeanVar1dDescriptor
(
bnScaleBiasMeanVarLengths
,
bnMeanVarStrides
);
}
AccDataType
epsilon_
;
bool
haveSavedMeanInvVar_
;
std
::
array
<
index_t
,
Rank
>
xyLengths_
;
std
::
array
<
index_t
,
Rank
>
xStrides_
;
std
::
array
<
index_t
,
Rank
>
dyStrides_
;
std
::
array
<
index_t
,
Rank
>
dxStrides_
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarLengths_
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleStrides_
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnBiasStrides_
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnMeanVarStrides_
;
const
XDataType
*
p_x_
;
const
DyDataType
*
p_dy_
;
const
ScaleDataType
*
p_scale_
;
const
MeanVarDataType
*
p_savedMean_
;
const
MeanVarDataType
*
p_savedInvVar_
;
const
DyElementwiseOp
dy_elementwise_op_
;
DxDataType
*
p_dx_
;
ScaleDataType
*
p_dscale_
;
BiasDataType
*
p_dbias_
;
long_index_t
invariant_length
;
long_index_t
reduce_length
;
int
blkGroupSize
;
int
numBlockTileIteration
;
size_t
gridSize
;
XYGridDesc_M_K
x_grid_desc_m_k
;
XYGridDesc_M_K
dy_grid_desc_m_k
;
XYGridDesc_M_K
dx_grid_desc_m_k
;
ScaleBiasGridDesc_M
scale_grid_desc_m
;
ScaleBiasGridDesc_M
bias_grid_desc_m
;
MeanVarGridDesc_M
mean_var_grid_desc_m
;
void
*
workspace_mean
;
void
*
workspace_variance
;
void
*
workspace_count
;
void
*
workspace_savedMean
;
void
*
workspace_savedInvVar
;
void
*
workspace_reduce_dscale
;
void
*
workspace_reduce_dbias
;
};
size_t
GetWorkSpaceSize
(
const
BaseArgument
*
pArg
)
const
override
{
const
Argument
*
pArg_
=
dynamic_cast
<
const
Argument
*>
(
pArg
);
size_t
workspace_size
=
0
;
if
(
UseMultiblockInK
&&
pArg_
->
blkGroupSize
>
1
)
{
// workspace for the partial reduced result for dscale
workspace_size
+=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
ScaleDataType
)
+
64
;
// workspace for the partial reduced result for dbias
workspace_size
+=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
BiasDataType
)
+
64
;
if
(
!
pArg_
->
haveSavedMeanInvVar_
)
{
// workspace for welford intermediate mean
workspace_size
+=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
MeanVarDataType
)
+
64
;
// workspace for welford intermediate variance
workspace_size
+=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
MeanVarDataType
)
+
64
;
// workspace for welford intermediate count
workspace_size
+=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
int32_t
)
+
64
;
// workspace for welford result mean
workspace_size
+=
pArg_
->
invariant_length
*
sizeof
(
MeanVarDataType
)
+
64
;
// workspace for welford result inv_variance
workspace_size
+=
pArg_
->
invariant_length
*
sizeof
(
MeanVarDataType
)
+
64
;
};
}
return
(
workspace_size
);
};
void
SetWorkSpacePointer
(
BaseArgument
*
pArg
,
void
*
p_workspace
)
const
override
{
Argument
*
pArg_
=
dynamic_cast
<
Argument
*>
(
pArg
);
pArg_
->
p_workspace_
=
p_workspace
;
index_t
space_sz
;
// setup buffer for the partial reduced result for dscale
pArg_
->
workspace_reduce_dscale
=
pArg_
->
p_workspace_
;
space_sz
=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
ScaleDataType
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for the partial reduced result for dbias
pArg_
->
workspace_reduce_dbias
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_reduce_dscale
)
+
space_sz
;
if
(
UseMultiblockInK
&&
pArg_
->
blkGroupSize
>
1
)
{
space_sz
=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
BiasDataType
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for welford intermediate mean
pArg_
->
workspace_mean
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_reduce_dbias
)
+
space_sz
;
space_sz
=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
MeanVarDataType
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for welford intermediate varirance
pArg_
->
workspace_variance
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_mean
)
+
space_sz
;
space_sz
=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
MeanVarDataType
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for welford intermediate count
pArg_
->
workspace_count
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_variance
)
+
space_sz
;
space_sz
=
pArg_
->
invariant_length
*
pArg_
->
blkGroupSize
*
sizeof
(
int32_t
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for welford result mean
pArg_
->
workspace_savedMean
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_count
)
+
space_sz
;
space_sz
=
pArg_
->
invariant_length
*
sizeof
(
MeanVarDataType
);
space_sz
=
math
::
integer_least_multiple
(
space_sz
,
64
);
// setup buffer for welford result inv_variance
pArg_
->
workspace_savedInvVar
=
reinterpret_cast
<
char
*>
(
pArg_
->
workspace_savedMean
)
+
space_sz
;
};
};
struct
Invoker
:
public
BaseInvoker
{
float
Run
(
const
Argument
&
arg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
{
float
avg_time
=
0
;
const
auto
mean_var_count_grid_desc_m_g
=
DeviceBatchNormBwdImpl
::
MakeMultiblockFirstReduceOutputMG2dDescriptor
(
arg
.
invariant_length
,
arg
.
blkGroupSize
);
const
auto
dscale_dbias_grid_desc_m_g
=
DeviceBatchNormBwdImpl
::
MakeMultiblockFirstReduceOutputMG2dDescriptor
(
arg
.
invariant_length
,
arg
.
blkGroupSize
);
const
auto
mean_var_count_grid_desc_m_k
=
DeviceBatchNormBwdImpl
::
MakeMultiblockFinalReduceInputMK2dDescriptor
(
arg
.
invariant_length
,
arg
.
blkGroupSize
);
const
auto
dscale_dbias_grid_desc_m_k
=
DeviceBatchNormBwdImpl
::
MakeMultiblockFinalReduceInputMK2dDescriptor
(
arg
.
invariant_length
,
arg
.
blkGroupSize
);
using
MeanVarCountGridDesc_M_G
=
decltype
(
mean_var_count_grid_desc_m_g
);
using
MeanVarCountGridDesc_M_K
=
decltype
(
mean_var_count_grid_desc_m_k
);
using
DscaleDbiasGridDesc_M_G
=
decltype
(
dscale_dbias_grid_desc_m_g
);
using
DscaleDbiasGridDesc_M_K
=
decltype
(
dscale_dbias_grid_desc_m_k
);
using
GridwiseWelfordSecondHalfReduceFirstHalf_
=
GridwiseWelfordSecondHalfReduceFirstHalf
<
XDataType
,
DyDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
MeanVarGridDesc_M
,
MeanVarCountGridDesc_M_K
,
DscaleDbiasGridDesc_M_G
,
BlockSize
,
MThreadClusterSize
,
KThreadClusterSize
,
MThreadSliceSize
,
KThreadSliceSize
,
XDyDxVectorDim
,
XSrcVectorSize
,
DySrcVectorSize
,
MeanVarSrcVectorSize
>
;
using
GridwiseReduceSecondHalfBatchNormBwdFinal_
=
GridwiseReduceSecondHalfBatchNormBackwardFinal
<
XDataType
,
DyDataType
,
DxDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
DscaleDbiasGridDesc_M_K
,
MeanVarGridDesc_M
,
ScaleBiasGridDesc_M
,
BlockSize
,
MThreadClusterSize
,
KThreadClusterSize
,
MThreadSliceSize
,
KThreadSliceSize
,
XDyDxVectorDim
,
XSrcVectorSize
,
DySrcVectorSize
,
DxDstVectorSize
,
ScaleSrcDstVectorSize
,
BiasDstVectorSize
,
MeanVarSrcVectorSize
>
;
if
(
UseMultiblockInK
&&
arg
.
blkGroupSize
>
1
)
{
using
GetReduceCountPerThreadFunctor
=
GetReduceCountPerThreadForMultiblockWelford
<
K_BlockTileSize
,
KThreadSliceSize
>
;
GetReduceCountPerThreadFunctor
get_reduce_count_per_thread
(
arg
.
blkGroupSize
,
arg
.
numBlockTileIteration
,
arg
.
reduce_length
);
if
(
!
arg
.
haveSavedMeanInvVar_
)
{
using
GridwiseMultiblockWelfordFirstHalf_
=
GridwiseMultiblockWelfordFirstHalf
<
XDataType
,
AccDataType
,
MeanVarDataType
,
XYGridDesc_M_K
,
MeanVarCountGridDesc_M_G
,
GetReduceCountPerThreadFunctor
,
BlockSize
,
MThreadClusterSize
,
KThreadClusterSize
,
MThreadSliceSize
,
KThreadSliceSize
,
XDyDxVectorDim
,
XSrcVectorSize
>
;
const
auto
kern_multiblock_welford_first_half
=
kernel_multiblock_welford_first_half
<
GridwiseMultiblockWelfordFirstHalf_
,
XDataType
,
MeanVarDataType
,
XYGridDesc_M_K
,
MeanVarCountGridDesc_M_G
,
GetReduceCountPerThreadFunctor
>
;
avg_time
+=
launch_and_time_kernel
(
stream_config
,
kern_multiblock_welford_first_half
,
dim3
(
arg
.
gridSize
),
dim3
(
BlockSize
),
0
,
arg
.
x_grid_desc_m_k
,
mean_var_count_grid_desc_m_g
,
get_reduce_count_per_thread
,
arg
.
numBlockTileIteration
,
arg
.
p_x_
,
static_cast
<
MeanVarDataType
*>
(
arg
.
workspace_mean
),
static_cast
<
MeanVarDataType
*>
(
arg
.
workspace_variance
),
static_cast
<
int32_t
*>
(
arg
.
workspace_count
));
};
const
auto
kern_welford_second_half_reduce_first_half
=
kernel_welford_second_half_reduce_first_half
<
GridwiseWelfordSecondHalfReduceFirstHalf_
,
XDataType
,
DyDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
MeanVarGridDesc_M
,
MeanVarCountGridDesc_M_K
,
DscaleDbiasGridDesc_M_G
>
;
const
auto
kern_reduce_second_half_batchnorm_backward_final
=
kernel_reduce_second_half_batchnorm_backward_final
<
GridwiseReduceSecondHalfBatchNormBwdFinal_
,
XDataType
,
DyDataType
,
DxDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
DscaleDbiasGridDesc_M_K
,
MeanVarGridDesc_M
,
ScaleBiasGridDesc_M
>
;
index_t
numDscaleDbiasBlockTileIteration
=
(
arg
.
blkGroupSize
+
KThreadClusterSize
-
1
)
/
KThreadClusterSize
;
avg_time
+=
launch_and_time_kernel
(
stream_config
,
kern_welford_second_half_reduce_first_half
,
dim3
(
arg
.
gridSize
),
dim3
(
BlockSize
),
0
,
arg
.
x_grid_desc_m_k
,
arg
.
dy_grid_desc_m_k
,
arg
.
mean_var_grid_desc_m
,
mean_var_count_grid_desc_m_k
,
dscale_dbias_grid_desc_m_g
,
arg
.
blkGroupSize
,
arg
.
numBlockTileIteration
,
numDscaleDbiasBlockTileIteration
,
arg
.
epsilon_
,
arg
.
haveSavedMeanInvVar_
,
arg
.
haveSavedMeanInvVar_
?
arg
.
p_savedMean_
:
nullptr
,
arg
.
haveSavedMeanInvVar_
?
arg
.
p_savedInvVar_
:
nullptr
,
arg
.
haveSavedMeanInvVar_
?
nullptr
:
static_cast
<
const
MeanVarDataType
*>
(
arg
.
workspace_mean
),
arg
.
haveSavedMeanInvVar_
?
nullptr
:
static_cast
<
const
MeanVarDataType
*>
(
arg
.
workspace_variance
),
arg
.
haveSavedMeanInvVar_
?
nullptr
:
static_cast
<
const
int32_t
*>
(
arg
.
workspace_count
),
arg
.
dy_elementwise_op_
,
arg
.
haveSavedMeanInvVar_
?
nullptr
:
static_cast
<
MeanVarDataType
*>
(
arg
.
workspace_savedMean
),
arg
.
haveSavedMeanInvVar_
?
nullptr
:
static_cast
<
MeanVarDataType
*>
(
arg
.
workspace_savedInvVar
),
arg
.
p_x_
,
arg
.
p_dy_
,
static_cast
<
ScaleDataType
*>
(
arg
.
workspace_reduce_dscale
),
static_cast
<
BiasDataType
*>
(
arg
.
workspace_reduce_dbias
));
avg_time
+=
launch_and_time_kernel
(
stream_config
,
kern_reduce_second_half_batchnorm_backward_final
,
dim3
(
arg
.
gridSize
),
dim3
(
BlockSize
),
0
,
arg
.
x_grid_desc_m_k
,
arg
.
dy_grid_desc_m_k
,
arg
.
dx_grid_desc_m_k
,
dscale_dbias_grid_desc_m_k
,
arg
.
mean_var_grid_desc_m
,
arg
.
scale_grid_desc_m
,
arg
.
bias_grid_desc_m
,
arg
.
blkGroupSize
,
arg
.
reduce_length
,
arg
.
numBlockTileIteration
,
numDscaleDbiasBlockTileIteration
,
static_cast
<
const
ScaleDataType
*>
(
arg
.
workspace_reduce_dscale
),
static_cast
<
const
BiasDataType
*>
(
arg
.
workspace_reduce_dbias
),
arg
.
haveSavedMeanInvVar_
?
arg
.
p_savedMean_
:
static_cast
<
const
MeanVarDataType
*>
(
arg
.
workspace_savedMean
),
arg
.
haveSavedMeanInvVar_
?
arg
.
p_savedInvVar_
:
static_cast
<
const
MeanVarDataType
*>
(
arg
.
workspace_savedInvVar
),
arg
.
p_x_
,
arg
.
p_dy_
,
arg
.
p_scale_
,
arg
.
dy_elementwise_op_
,
arg
.
p_dx_
,
arg
.
p_dscale_
,
arg
.
p_dbias_
);
}
else
{
using
GetReduceCountPerThreadFunctor
=
GetReduceCountPerThreadForBlockwiseWelford
<
K_BlockTileSize
,
KThreadSliceSize
>
;
GetReduceCountPerThreadFunctor
get_reduce_count_per_thread
(
arg
.
numBlockTileIteration
,
arg
.
reduce_length
);
using
GridwiseBatchNormBackwardWithBlockwiseWelford_
=
GridwiseBatchNormBackwardWithBlockwiseWelford
<
XDataType
,
DyDataType
,
DxDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
ScaleBiasGridDesc_M
,
MeanVarGridDesc_M
,
GetReduceCountPerThreadFunctor
,
BlockSize
,
MThreadClusterSize
,
KThreadClusterSize
,
MThreadSliceSize
,
KThreadSliceSize
,
XDyDxVectorDim
,
XSrcVectorSize
,
DySrcVectorSize
,
DxDstVectorSize
,
ScaleSrcDstVectorSize
,
BiasDstVectorSize
,
MeanVarSrcVectorSize
>
;
const
auto
kern_batchnorm_bwd
=
kernel_batchnorm_backward_with_blockwise_welford
<
GridwiseBatchNormBackwardWithBlockwiseWelford_
,
XDataType
,
DyDataType
,
DxDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
DyElementwiseOp
,
XYGridDesc_M_K
,
ScaleBiasGridDesc_M
,
MeanVarGridDesc_M
,
GetReduceCountPerThreadFunctor
>
;
avg_time
+=
launch_and_time_kernel
(
stream_config
,
kern_batchnorm_bwd
,
dim3
(
arg
.
gridSize
),
dim3
(
BlockSize
),
0
,
arg
.
x_grid_desc_m_k
,
arg
.
dy_grid_desc_m_k
,
arg
.
dx_grid_desc_m_k
,
arg
.
scale_grid_desc_m
,
arg
.
bias_grid_desc_m
,
arg
.
mean_var_grid_desc_m
,
get_reduce_count_per_thread
,
arg
.
reduce_length
,
arg
.
numBlockTileIteration
,
arg
.
epsilon_
,
arg
.
p_x_
,
arg
.
p_dy_
,
arg
.
p_scale_
,
arg
.
haveSavedMeanInvVar_
,
arg
.
p_savedMean_
,
arg
.
p_savedInvVar_
,
arg
.
dy_elementwise_op_
,
arg
.
p_dx_
,
arg
.
p_dscale_
,
arg
.
p_dbias_
);
};
return
(
avg_time
);
};
float
Run
(
const
BaseArgument
*
pArg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
override
{
return
Run
(
*
dynamic_cast
<
const
Argument
*>
(
pArg
),
stream_config
);
};
};
bool
IsSupportedArgument
(
const
BaseArgument
*
pArg
)
override
{
const
Argument
*
pArg_
=
dynamic_cast
<
const
Argument
*>
(
pArg
);
if
constexpr
(
XDyDxVectorDim
==
0
)
{
if
(
pArg_
->
xStrides_
[
NumInvariantDim
-
1
]
!=
1
||
pArg_
->
dyStrides_
[
NumInvariantDim
-
1
]
!=
1
||
pArg_
->
dxStrides_
[
NumInvariantDim
-
1
]
!=
1
)
return
false
;
if
(
pArg_
->
xyLengths_
[
NumInvariantDim
-
1
]
%
XSrcVectorSize
!=
0
||
pArg_
->
xyLengths_
[
NumInvariantDim
-
1
]
%
DySrcVectorSize
!=
0
||
pArg_
->
xyLengths_
[
NumInvariantDim
-
1
]
%
DxDstVectorSize
!=
0
)
return
false
;
}
else
{
if
(
pArg_
->
xStrides_
[
Rank
-
1
]
!=
1
||
pArg_
->
dyStrides_
[
Rank
-
1
]
!=
1
||
pArg_
->
dxStrides_
[
Rank
-
1
]
!=
1
)
return
false
;
if
(
pArg_
->
xyLengths_
[
Rank
-
1
]
%
XSrcVectorSize
!=
0
||
pArg_
->
xyLengths_
[
Rank
-
1
]
%
DySrcVectorSize
!=
0
||
pArg_
->
xyLengths_
[
Rank
-
1
]
%
DxDstVectorSize
!=
0
)
return
false
;
};
if
(
pArg_
->
bnScaleStrides_
[
NumInvariantDim
-
1
]
!=
1
&&
ScaleSrcDstVectorSize
!=
1
)
return
false
;
if
(
pArg_
->
bnBiasStrides_
[
NumInvariantDim
-
1
]
!=
1
&&
BiasDstVectorSize
!=
1
)
return
false
;
if
(
pArg_
->
bnScaleBiasMeanVarLengths_
[
NumInvariantDim
-
1
]
%
ScaleSrcDstVectorSize
!=
0
)
return
false
;
if
(
pArg_
->
bnScaleBiasMeanVarLengths_
[
NumInvariantDim
-
1
]
%
BiasDstVectorSize
!=
0
)
return
false
;
if
(
pArg_
->
haveSavedMeanInvVar_
)
{
if
(
pArg_
->
bnMeanVarStrides_
[
NumInvariantDim
-
1
]
!=
1
&&
MeanVarSrcVectorSize
!=
1
)
return
false
;
if
(
pArg_
->
bnScaleBiasMeanVarLengths_
[
NumInvariantDim
-
1
]
%
MeanVarSrcVectorSize
!=
0
)
return
false
;
};
bool
is_valid
=
true
;
static_for
<
0
,
NumInvariantDim
,
1
>
{}([
&
](
auto
I
)
{
if
(
pArg_
->
xyLengths_
[
I
]
!=
pArg_
->
bnScaleBiasMeanVarLengths_
[
I
])
is_valid
=
false
;
});
if
(
!
is_valid
)
return
false
;
return
true
;
};
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
const
std
::
array
<
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
index_t
,
Rank
>
dyStrides
,
const
std
::
array
<
index_t
,
Rank
>
dxStrides
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnScaleStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnBiasStrides
,
const
std
::
array
<
ck
::
index_t
,
NumInvariantDim
>
bnMeanVarStrides
,
const
void
*
p_x
,
const
void
*
p_dy
,
const
void
*
p_scale
,
const
void
*
p_savedMean
,
const
void
*
p_savedInvVar
,
double
epsilon
,
const
DyElementwiseOp
dy_elementwise_op
,
void
*
p_dx
,
void
*
p_dscale
,
void
*
p_dbias
)
override
{
return
std
::
make_unique
<
Argument
>
(
xyLengths
,
xStrides
,
dyStrides
,
dxStrides
,
reduceDims
,
bnScaleBiasMeanVarLengths
,
bnScaleStrides
,
bnBiasStrides
,
bnMeanVarStrides
,
static_cast
<
const
XDataType
*>
(
p_x
),
static_cast
<
const
DyDataType
*>
(
p_dy
),
static_cast
<
const
ScaleDataType
*>
(
p_scale
),
static_cast
<
const
MeanVarDataType
*>
(
p_savedMean
),
static_cast
<
const
MeanVarDataType
*>
(
p_savedInvVar
),
dy_elementwise_op
,
epsilon
,
static_cast
<
DxDataType
*>
(
p_dx
),
static_cast
<
ScaleDataType
*>
(
p_dscale
),
static_cast
<
BiasDataType
*>
(
p_dbias
));
};
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
override
{
return
std
::
make_unique
<
Invoker
>
();
};
std
::
string
GetTypeString
()
const
override
{
auto
str
=
std
::
stringstream
();
// clang-format off
str
<<
"DeviceBatchNormBwdImpl<"
<<
BlockSize
<<
","
;
str
<<
"M_C"
<<
MThreadClusterSize
<<
"_S"
<<
MThreadSliceSize
<<
","
;
str
<<
"K_C"
<<
KThreadClusterSize
<<
"_S"
<<
KThreadSliceSize
<<
","
;
str
<<
"XDyDxVectorDim_"
<<
XDyDxVectorDim
<<
","
;
str
<<
"VectorSize_X"
<<
XSrcVectorSize
<<
"_scale_"
<<
ScaleSrcDstVectorSize
<<
"_bias_"
<<
BiasDstVectorSize
<<
"_mean_var_"
<<
MeanVarSrcVectorSize
<<
"_Dx_"
<<
DxDstVectorSize
<<
">"
;
// clang-format on
return
str
.
str
();
}
};
// namespace device
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
include/ck/tensor_operation/gpu/device/impl/device_batchnorm_forward_impl.hpp
View file @
f0224f2a
...
...
@@ -42,8 +42,15 @@ template <typename XDataType,
index_t
ScaleSrcVectorSize
,
index_t
BiasSrcVectorSize
,
index_t
MeanVarSrcDstVectorSize
>
struct
DeviceBatchNormFwdImpl
:
public
DeviceBatchNormFwd
<
Rank
,
NumBatchNormReduceDim
,
YElementwiseOp
>
struct
DeviceBatchNormFwdImpl
:
public
DeviceBatchNormFwd
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
YElementwiseOp
,
Rank
,
NumBatchNormReduceDim
>
{
static_assert
(
Rank
<=
6
,
"Bigger Rank size is not supported!"
);
static_assert
(
BlockSize
==
MThreadClusterSize
*
KThreadClusterSize
,
...
...
include/ck/tensor_operation/gpu/device/impl/device_conv2d_backward_weight_xdl_c_shuffle_nhwc_kyxc_nhwk.hpp
View file @
f0224f2a
...
...
@@ -67,6 +67,8 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
WeiElementwiseOperation
,
OutElementwiseOperation
>
{
static
constexpr
ck
::
index_t
NDimSpatial
=
2
;
using
DeviceOp
=
DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_N_Ho_Wo_K
;
...
...
@@ -107,18 +109,18 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
static
constexpr
auto
BBlockLdsN0PerBlock
=
NPerBlock
/
BBlockLdsN1PerBlock
;
static
constexpr
auto
BBlockLdsN1Padding
=
4
;
static
auto
MakeABCGridDescriptor_A_K0_M_K1_B_K0_N_K1_C_M_N
(
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
,
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
,
std
::
vector
<
ck
::
index_t
>
input_left_pads
,
std
::
vector
<
ck
::
index_t
>
input_right_pads
,
ck
::
index_t
batch_k
)
static
auto
MakeABCGridDescriptor_A_K0_M_K1_B_K0_N_K1_C_M_N
(
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
,
ck
::
index_t
batch_k
)
{
using
namespace
ck
;
...
...
@@ -390,13 +392,13 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
,
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
,
std
::
vector
<
ck
::
index_t
>
input_left_pads
,
std
::
vector
<
ck
::
index_t
>
input_right_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
,
ck
::
index_t
M01
,
ck
::
index_t
N01
,
InElementwiseOperation
in_element_op
,
...
...
@@ -473,11 +475,11 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
index_t
Conv_N_
;
index_t
Conv_K_
;
index_t
Conv_C_
;
std
::
vector
<
index_t
>
output_spatial_lengths_
;
std
::
vector
<
index_t
>
filter_spatial_lengths_
;
std
::
vector
<
index_t
>
conv_filter_strides_
;
std
::
vector
<
index_t
>
input_left_pads_
;
std
::
vector
<
index_t
>
input_right_pads_
;
std
::
array
<
index_t
,
NDimSpatial
>
output_spatial_lengths_
;
std
::
array
<
index_t
,
NDimSpatial
>
filter_spatial_lengths_
;
std
::
array
<
index_t
,
NDimSpatial
>
conv_filter_strides_
;
std
::
array
<
index_t
,
NDimSpatial
>
input_left_pads_
;
std
::
array
<
index_t
,
NDimSpatial
>
input_right_pads_
;
index_t
k_batch_
;
};
...
...
@@ -682,13 +684,13 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
,
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
,
std
::
vector
<
ck
::
index_t
>
input_left_pads
,
std
::
vector
<
ck
::
index_t
>
input_right_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
,
InElementwiseOperation
in_element_op
,
WeiElementwiseOperation
wei_element_op
,
OutElementwiseOperation
out_element_op
,
...
...
@@ -724,13 +726,13 @@ struct DeviceConv2dBwdWeightXdl_C_Shuffle_Input_N_Hi_Wi_C_Weight_K_Y_X_C_Output_
ck
::
index_t
N
,
ck
::
index_t
K
,
ck
::
index_t
C
,
std
::
vector
<
ck
::
index_t
>
input_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
filter_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
output_spatial_lengths
,
std
::
vector
<
ck
::
index_t
>
conv_filter_strides
,
std
::
vector
<
ck
::
index_t
>
conv_filter_dilations
,
std
::
vector
<
ck
::
index_t
>
input_left_pads
,
std
::
vector
<
ck
::
index_t
>
input_right_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
filter_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
output_spatial_lengths
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_strides
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
conv_filter_dilations
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_left_pads
,
std
::
array
<
ck
::
index_t
,
NDimSpatial
>
input_right_pads
,
InElementwiseOperation
in_element_op
,
WeiElementwiseOperation
wei_element_op
,
OutElementwiseOperation
out_element_op
,
...
...
Prev
1
…
3
4
5
6
7
8
9
10
11
…
14
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment