Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
1dbdab56
Commit
1dbdab56
authored
Aug 18, 2022
by
Jing Zhang
Browse files
merge develop
parents
d2e49b23
bac7df8f
Changes
192
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2449 additions
and
357 deletions
+2449
-357
example/34_batchnorm/CMakeLists.txt
example/34_batchnorm/CMakeLists.txt
+2
-0
example/34_batchnorm/README.md
example/34_batchnorm/README.md
+56
-0
example/34_batchnorm/batchnorm_common.hpp
example/34_batchnorm/batchnorm_common.hpp
+181
-0
example/34_batchnorm/batchnorm_forward_impl.hpp
example/34_batchnorm/batchnorm_forward_impl.hpp
+295
-0
example/34_batchnorm/batchnorm_forward_nhwc.cpp
example/34_batchnorm/batchnorm_forward_nhwc.cpp
+466
-0
example/34_batchnorm/batchnorm_infer_impl.hpp
example/34_batchnorm/batchnorm_infer_impl.hpp
+119
-0
example/34_batchnorm/batchnorm_infer_nhwc.cpp
example/34_batchnorm/batchnorm_infer_nhwc.cpp
+346
-0
example/35_splitK_gemm/CMakeLists.txt
example/35_splitK_gemm/CMakeLists.txt
+0
-0
example/35_splitK_gemm/splitK_gemm_xdl_bfp16.cpp
example/35_splitK_gemm/splitK_gemm_xdl_bfp16.cpp
+0
-0
example/35_splitK_gemm/splitK_gemm_xdl_fp16.cpp
example/35_splitK_gemm/splitK_gemm_xdl_fp16.cpp
+0
-0
example/35_splitK_gemm/splitK_gemm_xdl_fp32.cpp
example/35_splitK_gemm/splitK_gemm_xdl_fp32.cpp
+0
-0
example/35_splitK_gemm/splitK_gemm_xdl_int8.cpp
example/35_splitK_gemm/splitK_gemm_xdl_int8.cpp
+0
-0
example/CMakeLists.txt
example/CMakeLists.txt
+9
-4
include/ck/tensor_description/tensor_descriptor.hpp
include/ck/tensor_description/tensor_descriptor.hpp
+7
-0
include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp
...e/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp
+371
-0
include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops_skip_b_lds.hpp
..._operation/gpu/block/blockwise_gemm_xdlops_skip_b_lds.hpp
+321
-0
include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp
include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp
+96
-0
include/ck/tensor_operation/gpu/block/blockwise_welford.hpp
include/ck/tensor_operation/gpu/block/blockwise_welford.hpp
+108
-0
include/ck/tensor_operation/gpu/block/reduction_functions_blockwise.hpp
...sor_operation/gpu/block/reduction_functions_blockwise.hpp
+72
-0
include/ck/tensor_operation/gpu/device/device_5ary_elementwise.hpp
...k/tensor_operation/gpu/device/device_5ary_elementwise.hpp
+0
-353
No files found.
example/34_batchnorm/CMakeLists.txt
0 → 100644
View file @
1dbdab56
add_example_executable
(
example_batchnorm_forward batchnorm_forward_nhwc.cpp
)
add_example_executable
(
example_batchnorm_infer batchnorm_infer_nhwc.cpp
)
example/34_batchnorm/README.md
0 → 100644
View file @
1dbdab56
# Instructions for ```batchnorm nhwc``` Example
## Run ```batchnorm forward nhwc```
```
bash
# -D <xxx> : input 4-d tensor lengths
# -v <x> : verification (0=no, 1=yes)
#arg1: data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)
#arg2: 1/0 to indicate whether to update the moving average and variance (0=no, 1=yes)
#arg3: 1/0 to indicate whether to save result mean/invVariance (0=no, 1=yes)
#arg4: initialization (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)
#arg5: time kernel (0=no, 1=yes)
./bin/example_batchnorm_forward
-D
128,16,16,1024
-v
1 0 0 1 2 1
```
Result
```
./bin/example_batchnorm_forward -D 128,16,16,1024 -v 1 0 0 1 2 1
launch_and_time_kernel: grid_dim {64, 1, 1}, block_dim {256, 1, 1}
Warm up 1 time
Start running 10 times...
launch_and_time_kernel: grid_dim {120, 1, 1}, block_dim {256, 1, 1}
Warm up 1 time
Start running 10 times...
launch_and_time_kernel: grid_dim {120, 1, 1}, block_dim {256, 1, 1}
Warm up 1 time
Start running 10 times...
Perf: 2.08231 ms, 354.519 GB/s
```
Result
```
./bin/example_batchnorm_forward -D 128,16,16,1024 -v 1 0 1 0 2 0
echo $?
0
```
## Run ```batchnorm infer nhwc```
```
bash
# -D <xxx> : input 4-d tensor lengths
# -v <x> : verification (0=no, 1=yes)
#arg1: data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)
#arg2: initialization (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)
#arg3: time kernel (0=no, 1=yes)
./bin/example_batchnorm_infer
-D
128,16,16,1024
-v
1 0 2 1
```
Result
```
./bin/example_batchnorm_infer -D 128,16,16,1024 -v 1 0 2 1
launch_and_time_kernel: grid_dim {120, 1, 1}, block_dim {256, 1, 1}
Warm up 1 time
Start running 10 times...
Perf: 1.28235 ms, 523.329 GB/s
```
example/34_batchnorm/batchnorm_common.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <cassert>
#include <vector>
#include <array>
#include <type_traits>
#include "ck/utility/data_type.hpp"
// binary operation used to calculate invVariance from mean and meansquare
struct
InvVariance
{
InvVariance
(
double
epsilon
)
:
epsilon_
(
epsilon
){};
template
<
typename
T
>
__host__
__device__
constexpr
void
operator
()(
T
&
y
,
const
T
&
mean
,
const
T
&
meansquare
)
const
{
static_assert
(
std
::
is_same
<
T
,
float
>::
value
||
std
::
is_same
<
T
,
double
>::
value
,
"Data type is not supported by this operation!"
);
using
ck
::
type_convert
;
using
ck
::
math
::
sqrt
;
T
tmp_epsilon
=
type_convert
<
T
>
(
epsilon_
);
y
=
meansquare
-
mean
*
mean
;
y
=
1.0
f
/
sqrt
(
tmp_epsilon
+
y
);
};
double
epsilon_
;
};
// (4-in, 2-out) element-wise operation used to update the moving average of mean and variance
struct
MovingAverage
{
MovingAverage
(
double
factor
)
:
factor_
(
factor
){};
template
<
typename
T
>
__host__
__device__
constexpr
void
operator
()(
T
&
y0
,
T
&
y1
,
const
T
&
mean
,
const
T
&
runningMean
,
const
T
&
meansquare
,
const
T
&
runningVariance
)
const
{
static_assert
(
std
::
is_same
<
T
,
float
>::
value
||
std
::
is_same
<
T
,
double
>::
value
,
"Data type is not supported by this operation!"
);
using
ck
::
type_convert
;
T
tmp_factor
=
type_convert
<
T
>
(
factor_
);
T
variance
=
meansquare
-
mean
*
mean
;
y0
=
runningMean
*
(
type_convert
<
T
>
(
1.0
f
)
-
tmp_factor
)
+
mean
*
tmp_factor
;
y1
=
runningVariance
*
(
type_convert
<
T
>
(
1.0
f
)
-
tmp_factor
)
+
variance
*
tmp_factor
;
};
double
factor_
;
};
struct
MovingAverageAndInvVariance
{
MovingAverageAndInvVariance
(
double
epsilon
,
double
factor
)
:
epsilon_
(
epsilon
),
factor_
(
factor
){};
template
<
typename
T
>
__host__
__device__
constexpr
void
operator
()(
T
&
y0
,
// resultRunningMean
T
&
y1
,
// resultRunningVariance
T
&
y2
,
// saveInvVariance
const
T
&
mean
,
const
T
&
runningMean
,
const
T
&
meansquare
,
const
T
&
runningVariance
)
const
{
static_assert
(
std
::
is_same
<
T
,
float
>::
value
||
std
::
is_same
<
T
,
double
>::
value
,
"Data type is not supported by this operation!"
);
using
ck
::
type_convert
;
using
ck
::
math
::
sqrt
;
T
tmp_epsilon
=
type_convert
<
T
>
(
epsilon_
);
T
tmp_factor
=
type_convert
<
T
>
(
factor_
);
T
variance
=
meansquare
-
mean
*
mean
;
y0
=
runningMean
*
(
type_convert
<
T
>
(
1.0
f
)
-
tmp_factor
)
+
mean
*
tmp_factor
;
y1
=
runningVariance
*
(
type_convert
<
T
>
(
1.0
f
)
-
tmp_factor
)
+
variance
*
tmp_factor
;
y2
=
1.0
f
/
sqrt
(
tmp_epsilon
+
variance
);
};
double
epsilon_
;
double
factor_
;
};
struct
NormalizeInInfer
{
NormalizeInInfer
(
double
epsilon
=
1e-4
)
:
epsilon_
(
epsilon
)
{}
template
<
typename
T1
,
typename
T2
>
__host__
__device__
constexpr
void
operator
()(
T1
&
y
,
const
T1
&
x
,
const
T2
&
mean
,
const
T2
&
variance
,
const
T2
&
gamma
,
const
T2
&
beta
)
const
{
static_assert
(
std
::
is_same
<
T2
,
float
>::
value
||
std
::
is_same
<
T2
,
double
>::
value
,
"Data type is not supported by this operation!"
);
using
ck
::
type_convert
;
using
ck
::
math
::
sqrt
;
T2
tmp_x
,
tmp_y
;
tmp_x
=
type_convert
<
T2
>
(
x
);
tmp_y
=
((
tmp_x
-
mean
)
/
sqrt
(
variance
+
type_convert
<
T2
>
(
epsilon_
)))
*
gamma
+
beta
;
y
=
type_convert
<
T1
>
(
tmp_y
);
};
double
epsilon_
;
};
struct
NormalizeInForward
{
NormalizeInForward
(
double
epsilon
=
1e-4
)
:
epsilon_
(
epsilon
)
{}
template
<
typename
T1
,
typename
T2
>
__host__
__device__
constexpr
void
operator
()(
T1
&
y
,
const
T1
&
x
,
const
T2
&
mean
,
const
T2
&
meansquare
,
const
T2
&
gamma
,
const
T2
&
beta
)
const
{
static_assert
(
std
::
is_same
<
T2
,
float
>::
value
||
std
::
is_same
<
T2
,
double
>::
value
,
"Data type is not supported by this operation!"
);
using
ck
::
type_convert
;
using
ck
::
math
::
sqrt
;
T2
tmp_x
,
tmp_y
;
T2
variance
=
meansquare
-
mean
*
mean
;
tmp_x
=
type_convert
<
T2
>
(
x
);
tmp_y
=
((
tmp_x
-
mean
)
/
sqrt
(
variance
+
type_convert
<
T2
>
(
epsilon_
)))
*
gamma
+
beta
;
y
=
type_convert
<
T1
>
(
tmp_y
);
};
double
epsilon_
;
};
template
<
int
Rank
,
int
NumReduceDim
>
static
inline
std
::
array
<
int
,
Rank
-
NumReduceDim
>
get_invariant_dims
(
const
std
::
array
<
int
,
NumReduceDim
>&
reduceDims
)
{
int
reduceFlag
=
0
;
// flag the bits for the reduceDims
for
(
int
i
=
0
;
i
<
NumReduceDim
;
i
++
)
{
reduceFlag
|=
1
<<
reduceDims
[
i
];
};
std
::
array
<
int
,
Rank
-
NumReduceDim
>
invariantDims
;
// collect invariant dimensions
int
dim
=
0
;
for
(
int
i
=
0
;
i
<
Rank
;
i
++
)
if
((
reduceFlag
&
(
1
<<
i
))
==
0
)
{
invariantDims
[
dim
]
=
i
;
dim
++
;
};
return
invariantDims
;
};
example/34_batchnorm/batchnorm_forward_impl.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <cassert>
#include <vector>
#include "ck/ck.hpp"
#include "ck/utility/reduction_operator.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_multiple_reduce_multiblock.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "batchnorm_common.hpp"
template
<
typename
InOutDataType
,
typename
AccDataType
,
ck
::
index_t
Rank
,
ck
::
index_t
NumBatchNormReduceDim
,
bool
fastest_dim_is_reduced
=
false
>
int
bnorm_fwd
(
bool
time_kernel
,
bool
updateMovingAverage
,
bool
saveMeanAndInvVariance
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
yStrides
,
const
std
::
array
<
ck
::
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
ck
::
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarStrides
,
const
void
*
p_x
,
const
void
*
p_scale
,
const
void
*
p_bias
,
void
*
p_y
,
double
exponentialAverageFactor
,
void
*
p_runningMean
,
void
*
p_runningVariance
,
double
epsilon
,
void
*
p_saveMean
,
void
*
p_saveInvVariance
,
void
*
p_tmp_mean
,
void
*
p_tmp_meansquare
)
{
static_assert
(
NumBatchNormReduceDim
<
Rank
,
"Invalid number of reduced dimensions for batchnorm!"
);
constexpr
ck
::
index_t
NumScaleBiasMeanVarDim
=
Rank
-
NumBatchNormReduceDim
;
using
InElementwiseOperation_Mean
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
AccElementwiseOperation_Mean
=
ck
::
tensor_operation
::
element_wise
::
UnaryDivide
;
using
InElementwiseOperation_Meansquare
=
ck
::
tensor_operation
::
element_wise
::
UnarySquare
;
using
AccElementwiseOperation_Meansquare
=
ck
::
tensor_operation
::
element_wise
::
UnaryDivide
;
using
DeviceMeanAndMeansquareInstance
=
ck
::
tensor_operation
::
device
::
DeviceMultipleReduceMultiBlock
<
2
,
InOutDataType
,
AccDataType
,
ck
::
Tuple
<
AccDataType
,
AccDataType
>
,
Rank
,
NumBatchNormReduceDim
,
ck
::
reduce
::
Add
,
ck
::
Tuple
<
InElementwiseOperation_Mean
,
InElementwiseOperation_Meansquare
>
,
ck
::
Tuple
<
AccElementwiseOperation_Mean
,
AccElementwiseOperation_Meansquare
>
,
ck
::
InMemoryDataOperationEnum
::
Set
,
false
,
// PropagateNan
256
,
16
,
16
,
1
,
1
,
fastest_dim_is_reduced
?
1
:
0
,
1
,
ck
::
Sequence
<
1
,
1
>>
;
using
DeviceNormalizeInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
InOutDataType
,
AccDataType
,
AccDataType
,
AccDataType
,
AccDataType
>
,
// x, mean,
// meansquare,
// scale, bias
ck
::
Tuple
<
InOutDataType
>
,
// y
NormalizeInForward
,
Rank
,
2
,
// MPerthread
ck
::
Sequence
<
1
,
1
,
1
,
1
,
1
>
,
// scalarPerVector: x, mean, meansquare, scale, bias
ck
::
Sequence
<
1
>>
;
// scalarPerVector: y
using
DeviceInvVarianceInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
AccDataType
,
AccDataType
>
,
// mean, meansquare
ck
::
Tuple
<
AccDataType
>
,
// invVariance
InvVariance
,
NumScaleBiasMeanVarDim
,
2
,
// MPerthread
ck
::
Sequence
<
1
,
1
>
,
// scalarPerVector: mean, meansquare
ck
::
Sequence
<
1
>>
;
// scalarPerVector: invVariance
using
DeviceMovingAverageInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
AccDataType
,
AccDataType
,
AccDataType
,
AccDataType
>
,
// old moving mean, new mean,
// old moving variance, new
// meansquare
ck
::
Tuple
<
AccDataType
,
AccDataType
>
,
// updated moving mean, updated moving variance
MovingAverage
,
NumScaleBiasMeanVarDim
,
4
,
// MPerthread
ck
::
Sequence
<
1
,
1
,
1
,
1
>
,
// scalarPerVector: old moving mean, new mean, old moving
// variance, new meansquare
ck
::
Sequence
<
1
,
1
>>
;
// scalarPerVector: updated moving mean, updated moving variance
using
DeviceMovingAverageAndInvVarianceInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
AccDataType
,
AccDataType
,
AccDataType
,
AccDataType
>
,
// old moving mean, new
// mean, old moving
// variance, new
// meansquare
ck
::
Tuple
<
AccDataType
,
AccDataType
,
AccDataType
>
,
// updated moving mean, updated moving
// variancem, invVariance
MovingAverageAndInvVariance
,
NumScaleBiasMeanVarDim
,
4
,
// MPerthread
ck
::
Sequence
<
1
,
1
,
1
,
1
>
,
// scalarPerVector: old moving mean, new mean, old moving
// variance, new meansquare
ck
::
Sequence
<
1
,
1
,
1
>>
;
// scalarPerVector: updated moving mean, updated moving variance
auto
invariantDims
=
get_invariant_dims
<
Rank
,
NumBatchNormReduceDim
>
(
reduceDims
);
std
::
array
<
ck
::
index_t
,
Rank
>
aligned_scaleBiasMeanVarStrides
{
0
};
int
i
=
0
;
for
(
auto
dim
:
invariantDims
)
{
assert
(
xyLengths
[
dim
]
==
bnScaleBiasMeanVarLengths
[
i
]);
aligned_scaleBiasMeanVarStrides
[
dim
]
=
bnScaleBiasMeanVarStrides
[
i
];
i
++
;
};
int32_t
reduceLength
=
1
;
for
(
auto
dim
:
reduceDims
)
reduceLength
*=
xyLengths
[
dim
];
int32_t
invariantLength
=
1
;
for
(
auto
dim
:
invariantDims
)
invariantLength
*=
xyLengths
[
dim
];
size_t
total_length
=
static_cast
<
size_t
>
(
invariantLength
)
*
reduceLength
;
float
avg_time
=
0.0
f
;
std
::
size_t
num_bytes
=
0
;
auto
dev_mean_and_meansquare
=
DeviceMeanAndMeansquareInstance
{};
void
*
p_mean
=
saveMeanAndInvVariance
?
p_saveMean
:
p_tmp_mean
;
const
AccDataType
alpha
=
ck
::
type_convert
<
AccDataType
>
(
1.0
f
);
const
AccDataType
beta
=
ck
::
type_convert
<
AccDataType
>
(
0.0
f
);
auto
argument_ptr1
=
dev_mean_and_meansquare
.
MakeArgumentPointer
(
xyLengths
,
xStrides
,
bnScaleBiasMeanVarLengths
,
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
reduceDims
,
{
&
alpha
,
&
alpha
},
{
&
beta
,
&
beta
},
p_x
,
{
p_mean
,
p_tmp_meansquare
},
ck
::
make_tuple
(
InElementwiseOperation_Mean
{},
InElementwiseOperation_Meansquare
{}),
ck
::
make_tuple
(
AccElementwiseOperation_Mean
{
reduceLength
},
AccElementwiseOperation_Meansquare
{
reduceLength
}));
auto
dev_normalize
=
DeviceNormalizeInstance
{};
auto
argument_ptr2
=
dev_normalize
.
MakeArgumentPointer
(
xyLengths
,
{
xStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
},
{
yStrides
},
{
p_x
,
p_mean
,
p_tmp_meansquare
,
p_scale
,
p_bias
},
{
p_y
},
NormalizeInForward
{
epsilon
});
if
(
!
dev_mean_and_meansquare
.
IsSupportedArgument
(
argument_ptr1
.
get
())
||
!
dev_normalize
.
IsSupportedArgument
(
argument_ptr2
.
get
()))
{
std
::
cout
<<
"The runtime parameters seems not supported by the Devic, exiting!"
<<
std
::
endl
;
return
(
-
1
);
};
auto
invoker_ptr1
=
dev_mean_and_meansquare
.
MakeInvokerPointer
();
auto
invoker_ptr2
=
dev_normalize
.
MakeInvokerPointer
();
avg_time
+=
invoker_ptr1
->
Run
(
argument_ptr1
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
avg_time
+=
invoker_ptr2
->
Run
(
argument_ptr2
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
num_bytes
+=
(
total_length
*
sizeof
(
InOutDataType
)
+
invariantLength
*
2
*
sizeof
(
AccDataType
))
+
// No.1
(
total_length
*
(
1
*
sizeof
(
InOutDataType
)
+
4
*
sizeof
(
AccDataType
))
+
total_length
*
sizeof
(
InOutDataType
));
// No.2
if
(
saveMeanAndInvVariance
&&
updateMovingAverage
)
{
auto
dev_moving_average_inv_variance
=
DeviceMovingAverageAndInvVarianceInstance
{};
auto
argument_ptr3
=
dev_moving_average_inv_variance
.
MakeArgumentPointer
(
bnScaleBiasMeanVarLengths
,
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
{
p_mean
,
p_runningMean
,
p_tmp_meansquare
,
p_runningVariance
},
{
p_runningMean
,
p_runningVariance
,
p_saveInvVariance
},
MovingAverageAndInvVariance
{
epsilon
,
exponentialAverageFactor
});
if
(
!
dev_moving_average_inv_variance
.
IsSupportedArgument
(
argument_ptr3
.
get
()))
{
std
::
cout
<<
"Runtime parameters not supported by the Device, exiting!"
<<
std
::
endl
;
return
(
-
1
);
};
auto
invoker_ptr3
=
dev_moving_average_inv_variance
.
MakeInvokerPointer
();
avg_time
+=
invoker_ptr3
->
Run
(
argument_ptr3
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
num_bytes
+=
invariantLength
*
(
4
+
3
)
*
sizeof
(
AccDataType
)
*
2
;
// No.5
}
else
if
(
saveMeanAndInvVariance
)
{
auto
dev_inv_variance
=
DeviceInvVarianceInstance
{};
auto
argument_ptr3
=
dev_inv_variance
.
MakeArgumentPointer
(
bnScaleBiasMeanVarLengths
,
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
{
bnScaleBiasMeanVarStrides
},
{
p_mean
,
p_tmp_meansquare
},
{
p_saveInvVariance
},
InvVariance
{
epsilon
});
if
(
!
dev_inv_variance
.
IsSupportedArgument
(
argument_ptr3
.
get
()))
{
std
::
cout
<<
"Runtime parameters not supported by the Device, exiting!"
<<
std
::
endl
;
return
(
-
1
);
};
auto
invoker_ptr3
=
dev_inv_variance
.
MakeInvokerPointer
();
avg_time
+=
invoker_ptr3
->
Run
(
argument_ptr3
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
num_bytes
+=
invariantLength
*
(
2
+
1
)
*
sizeof
(
AccDataType
);
}
else
if
(
updateMovingAverage
)
{
auto
dev_moving_average
=
DeviceMovingAverageInstance
{};
auto
argument_ptr3
=
dev_moving_average
.
MakeArgumentPointer
(
bnScaleBiasMeanVarLengths
,
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
{
bnScaleBiasMeanVarStrides
,
bnScaleBiasMeanVarStrides
},
{
p_mean
,
p_runningMean
,
p_tmp_meansquare
,
p_runningVariance
},
{
p_runningMean
,
p_runningVariance
},
MovingAverage
{
exponentialAverageFactor
});
if
(
!
dev_moving_average
.
IsSupportedArgument
(
argument_ptr3
.
get
()))
{
std
::
cout
<<
"Runtime parameters not supported by the Device, exiting!"
<<
std
::
endl
;
return
(
-
1
);
};
auto
invoker_ptr3
=
dev_moving_average
.
MakeInvokerPointer
();
avg_time
+=
invoker_ptr3
->
Run
(
argument_ptr3
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
num_bytes
+=
invariantLength
*
(
4
+
2
)
*
sizeof
(
AccDataType
)
*
2
;
// No.5
};
if
(
time_kernel
)
{
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
std
::
cout
<<
"Perf: "
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
};
return
(
0
);
};
example/34_batchnorm/batchnorm_forward_nhwc.cpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <limits>
#include <iostream>
#include <vector>
#include <array>
#include <algorithm>
#include <getopt.h>
#include "ck/ck.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_common_util.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_forward_nhwc_c.hpp"
#include "batchnorm_forward_impl.hpp"
template
<
typename
InOutDataType
,
typename
AccDataType
>
using
ReferenceBatchNormFwdInstance
=
ck
::
tensor_operation
::
host
::
ReferenceBatchNormFwd_Input_N_H_W_C_Output_C
<
InOutDataType
,
AccDataType
>
;
static
struct
option
long_options
[]
=
{{
"inOutLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"verify"
,
required_argument
,
nullptr
,
'v'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
BatchNormFwdArg
{
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inOutLengths
;
bool
do_verification
=
false
;
bool
updateMovingAverage
;
bool
saveMeanAndInvVariance
;
int
data_type
=
0
;
int
init_method
=
2
;
bool
time_kernel
=
false
;
public:
void
show_usage
(
const
char
*
cmd
)
{
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inOutLengths or -D, comma separated list of input tensor dimension "
"lengths, must have 4 integers for nhwc"
<<
std
::
endl
;
std
::
cout
<<
"--verify or -v, 1/0 to indicate whether to verify the batch-normalization "
"result by "
"comparing with the host-based batch-normalization"
<<
std
::
endl
;
std
::
cout
<<
"Arg1: data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2: 1/0 to indicate whether to update the moving average and variance "
"(0=no, 1=yes)"
<<
std
::
endl
;
std
::
cout
<<
"Arg3: 1/0 to indicate whether to save the calculated mean and invVariance "
"(0=no, 1=yes)"
<<
std
::
endl
;
std
::
cout
<<
"Arg4: init method used for bnScale and bnBias (0=no init, 1=single integer "
"value, 2=scope integer "
"value, 3=decimal value)"
<<
std
::
endl
;
std
::
cout
<<
"Arg5: time kernel (0=no, 1=yes)"
<<
std
::
endl
;
};
int
processArgs
(
int
argc
,
char
*
argv
[])
{
using
ck
::
host_common
::
getTypeValuesFromString
;
int
ch
;
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:v:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inOutLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
if
(
inOutLengths
.
size
()
!=
4
)
throw
std
::
runtime_error
(
"NHWC tensor layout should have 4 length values specified!"
);
break
;
case
'v'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_verification
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
break
;
default:
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
};
if
(
optind
+
5
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
updateMovingAverage
=
std
::
atoi
(
argv
[
optind
++
]);
saveMeanAndInvVariance
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
++
]);
time_kernel
=
static_cast
<
bool
>
(
std
::
atoi
(
argv
[
optind
]));
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
3
&&
data_type
!=
5
&&
data_type
!=
6
)
return
(
-
1
);
return
(
0
);
};
};
using
namespace
ck
;
template
<
typename
InOutDataType
,
typename
AccDataType
>
bool
bnorm_fwd_nhwc_test
(
bool
do_verification
,
int
init_method
,
bool
time_kernel
,
const
std
::
vector
<
size_t
>
inOutLengths
,
bool
updateMovingAverage
,
bool
saveMeanAndInvVariance
,
double
averageFactor
,
double
epsilon
)
{
// for NHWC BatchNorm calculation of mean and meansquare
constexpr
int
Rank
=
4
;
constexpr
int
NumReduceDim
=
3
;
const
std
::
vector
<
size_t
>
scaleBiasMeanVarLengths
=
{
inOutLengths
[
3
]};
// input data of the batchnorm forward algorithm
Tensor
<
InOutDataType
>
x
(
inOutLengths
);
Tensor
<
AccDataType
>
bnScale
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
bnBias
(
scaleBiasMeanVarLengths
);
// output data of the batchnorm forward algorithm
Tensor
<
InOutDataType
>
y_ref
(
inOutLengths
);
Tensor
<
InOutDataType
>
y
(
inOutLengths
);
Tensor
<
AccDataType
>
resultSaveMean_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
resultSaveInvVariance_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
resultRunningMean_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
resultRunningVariance_ref
(
scaleBiasMeanVarLengths
);
auto
inOutStrides
=
x
.
mDesc
.
GetStrides
();
auto
scaleBiasMeanVarStrides
=
bnScale
.
mDesc
.
GetStrides
();
std
::
size_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
if
(
updateMovingAverage
)
{
if
constexpr
(
std
::
is_same
<
InOutDataType
,
int8_t
>::
value
)
{
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
InOutDataType
>
{
-
5
,
5
},
num_thread
);
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
2.5
f
;
const
float
noise_stddev
=
0.04
f
;
resultRunningMean_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
resultRunningVariance_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
}
else
{
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
1.0
f
;
const
float
noise_stddev
=
0.04
f
;
// input data in normal distribution
x
.
GenerateTensorValue
(
GeneratorTensor_4
<
InOutDataType
>
{
x_mean
,
x_stddev
},
num_thread
);
// initialize the runningMean to be values with tiny variation to the mean of the x
// values
resultRunningMean_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
// initialize the runningVariance to be values with tiny variation to the variance of
// the x values
resultRunningVariance_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
};
}
else
{
if
constexpr
(
std
::
is_same
<
InOutDataType
,
int8_t
>::
value
)
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
InOutDataType
>
{
-
5
,
5
},
num_thread
);
else
x
.
GenerateTensorValue
(
GeneratorTensor_3
<
InOutDataType
>
{
-
5.0
f
,
5.0
f
},
num_thread
);
};
if
(
do_verification
)
{
switch
(
init_method
)
{
case
0
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_0
<
AccDataType
>
{},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_0
<
AccDataType
>
{},
num_thread
);
break
;
case
1
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
1
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
0
},
num_thread
);
break
;
case
2
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_2
<
AccDataType
>
{
-
5
,
5
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_2
<
AccDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_3
<
AccDataType
>
{
-
5.0
f
,
5.0
f
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_3
<
AccDataType
>
{
-
5.0
f
,
5.0
f
},
num_thread
);
}
};
// these buffers are usually provided by the user application
DeviceMem
x_dev
(
sizeof
(
InOutDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
y_dev
(
sizeof
(
InOutDataType
)
*
y
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnScale_dev
(
sizeof
(
AccDataType
)
*
bnScale
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnBias_dev
(
sizeof
(
AccDataType
)
*
bnBias
.
mDesc
.
GetElementSpaceSize
());
// mean_dev or resultSaveMean_dev
DeviceMem
resultSaveMean_dev
(
sizeof
(
AccDataType
)
*
resultSaveMean_ref
.
mDesc
.
GetElementSpaceSize
());
// meansquare_dev or resultSaveInvVariance_dev
DeviceMem
resultSaveInvVariance_dev
(
sizeof
(
AccDataType
)
*
resultSaveInvVariance_ref
.
mDesc
.
GetElementSpaceSize
());
// resultRunningMean_dev
DeviceMem
resultRunningMean_dev
(
sizeof
(
AccDataType
)
*
resultRunningMean_ref
.
mDesc
.
GetElementSpaceSize
());
// resultRunningVariance_dev
DeviceMem
resultRunningVariance_dev
(
sizeof
(
AccDataType
)
*
resultRunningVariance_ref
.
mDesc
.
GetElementSpaceSize
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
bnScale_dev
.
ToDevice
(
bnScale
.
mData
.
data
());
bnBias_dev
.
ToDevice
(
bnBias
.
mData
.
data
());
if
(
updateMovingAverage
)
{
resultRunningMean_dev
.
ToDevice
(
resultRunningMean_ref
.
mData
.
data
());
resultRunningVariance_dev
.
ToDevice
(
resultRunningVariance_ref
.
mData
.
data
());
};
std
::
array
<
index_t
,
Rank
>
i_inOutLengths
;
std
::
array
<
index_t
,
Rank
>
i_inOutStrides
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
i_inOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
i_inOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
scaleBiasMeanVarLengths
.
end
(),
i_scaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
i_scaleBiasMeanVarStrides
.
begin
());
int
result
=
0
;
// used for saving meansquare
DeviceMem
workspace
(
sizeof
(
AccDataType
)
*
2
*
resultSaveMean_ref
.
mDesc
.
GetElementSpaceSize
()
+
128
);
void
*
p_tmp_mean
=
workspace
.
GetDeviceBuffer
();
void
*
p_tmp_meansquare
=
static_cast
<
char
*>
(
p_tmp_mean
)
+
(
sizeof
(
AccDataType
)
*
resultSaveMean_ref
.
mDesc
.
GetElementSpaceSize
()
+
63
)
/
64
*
64
;
result
=
bnorm_fwd
<
InOutDataType
,
AccDataType
,
Rank
,
NumReduceDim
,
false
>
(
time_kernel
,
updateMovingAverage
,
saveMeanAndInvVariance
,
{
0
,
1
,
2
},
i_inOutLengths
,
i_inOutStrides
,
i_inOutStrides
,
i_scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarStrides
,
x_dev
.
GetDeviceBuffer
(),
bnScale_dev
.
GetDeviceBuffer
(),
bnBias_dev
.
GetDeviceBuffer
(),
y_dev
.
GetDeviceBuffer
(),
averageFactor
,
updateMovingAverage
?
resultRunningMean_dev
.
GetDeviceBuffer
()
:
nullptr
,
updateMovingAverage
?
resultRunningVariance_dev
.
GetDeviceBuffer
()
:
nullptr
,
epsilon
,
saveMeanAndInvVariance
?
resultSaveMean_dev
.
GetDeviceBuffer
()
:
nullptr
,
saveMeanAndInvVariance
?
resultSaveInvVariance_dev
.
GetDeviceBuffer
()
:
nullptr
,
p_tmp_mean
,
p_tmp_meansquare
);
if
(
result
<
0
)
return
(
false
);
bool
pass
=
true
;
if
(
do_verification
)
{
auto
batchNormFwd_ref
=
ReferenceBatchNormFwdInstance
<
InOutDataType
,
AccDataType
>
{};
auto
argument_ptr_ref
=
batchNormFwd_ref
.
MakeArgumentPointer
(
i_inOutLengths
,
i_inOutStrides
,
i_inOutStrides
,
i_scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarStrides
,
x
.
mData
.
data
(),
bnScale
.
mData
.
data
(),
bnBias
.
mData
.
data
(),
y_ref
.
mData
.
data
(),
0.1
,
// exponentialAverageFactor
updateMovingAverage
?
resultRunningMean_ref
.
mData
.
data
()
:
nullptr
,
// resultRunningMean
updateMovingAverage
?
resultRunningVariance_ref
.
mData
.
data
()
:
nullptr
,
// resultRunningVariance
epsilon
,
saveMeanAndInvVariance
?
resultSaveMean_ref
.
mData
.
data
()
:
nullptr
,
saveMeanAndInvVariance
?
resultSaveInvVariance_ref
.
mData
.
data
()
:
nullptr
);
if
(
!
batchNormFwd_ref
.
IsSupportedArgument
(
argument_ptr_ref
.
get
()))
{
std
::
cout
<<
"The runtime parameters seems not supported by the BatchNorm instance, exiting!"
<<
std
::
endl
;
return
(
-
2
);
};
auto
invoker_ptr_ref
=
batchNormFwd_ref
.
MakeInvokerPointer
();
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
.
mData
,
y_ref
.
mData
);
if
(
updateMovingAverage
)
{
Tensor
<
AccDataType
>
resultRunningMean
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
resultRunningVariance
(
scaleBiasMeanVarLengths
);
resultRunningMean_dev
.
FromDevice
(
resultRunningMean
.
mData
.
data
());
resultRunningVariance_dev
.
FromDevice
(
resultRunningVariance
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultRunningMean
.
mData
,
resultRunningMean_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultRunningVariance
.
mData
,
resultRunningVariance_ref
.
mData
);
};
if
(
saveMeanAndInvVariance
)
{
Tensor
<
AccDataType
>
resultSaveMean
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
resultSaveInvVariance
(
scaleBiasMeanVarLengths
);
resultSaveMean_dev
.
FromDevice
(
resultSaveMean
.
mData
.
data
());
resultSaveInvVariance_dev
.
FromDevice
(
resultSaveInvVariance
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveMean
.
mData
,
resultSaveMean_ref
.
mData
);
pass
=
pass
&&
ck
::
utils
::
check_err
(
resultSaveInvVariance
.
mData
,
resultSaveInvVariance_ref
.
mData
);
};
};
return
(
pass
);
};
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
static
const
double
averageFactor
=
0.1
;
int
main
(
int
argc
,
char
*
argv
[])
{
bool
pass
=
true
;
if
(
argc
>
1
)
{
BatchNormFwdArg
arg
;
if
(
arg
.
processArgs
(
argc
,
argv
)
<
0
)
return
(
-
1
);
if
(
arg
.
data_type
==
0
)
{
pass
=
bnorm_fwd_nhwc_test
<
ck
::
half_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
arg
.
updateMovingAverage
,
arg
.
saveMeanAndInvVariance
,
averageFactor
,
epsilon
);
}
else
if
(
arg
.
data_type
==
1
)
{
pass
=
bnorm_fwd_nhwc_test
<
float
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
arg
.
updateMovingAverage
,
arg
.
saveMeanAndInvVariance
,
averageFactor
,
epsilon
);
}
else
if
(
arg
.
data_type
==
3
)
{
pass
=
bnorm_fwd_nhwc_test
<
int8_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
arg
.
updateMovingAverage
,
arg
.
saveMeanAndInvVariance
,
averageFactor
,
epsilon
);
}
else
if
(
arg
.
data_type
==
5
)
{
pass
=
bnorm_fwd_nhwc_test
<
ck
::
bhalf_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
arg
.
updateMovingAverage
,
arg
.
saveMeanAndInvVariance
,
averageFactor
,
epsilon
);
}
else
if
(
arg
.
data_type
==
6
)
{
pass
=
bnorm_fwd_nhwc_test
<
double
,
double
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
arg
.
updateMovingAverage
,
arg
.
saveMeanAndInvVariance
,
averageFactor
,
epsilon
);
}
}
else
{
pass
=
bnorm_fwd_nhwc_test
<
ck
::
half_t
,
float
>
(
true
,
2
,
false
,
// don't time kernel
{
128
,
16
,
16
,
1024
},
true
,
false
,
averageFactor
,
epsilon
);
};
return
(
pass
?
0
:
1
);
}
example/34_batchnorm/batchnorm_infer_impl.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <cassert>
#include <vector>
#include "ck/ck.hpp"
#include "ck/utility/sequence.hpp"
#include "ck/utility/tuple.hpp"
#include "ck/utility/reduction_operator.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "batchnorm_common.hpp"
template
<
typename
InOutDataType
,
typename
AccDataType
,
ck
::
index_t
Rank
,
ck
::
index_t
NumBatchNormReduceDim
,
bool
fastest_dim_is_reduced
=
false
>
int
bnorm_infer
(
bool
time_kernel
,
const
std
::
array
<
int
,
NumBatchNormReduceDim
>
reduceDims
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
xyLengths
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
xStrides
,
const
std
::
array
<
ck
::
index_t
,
Rank
>
yStrides
,
const
std
::
array
<
ck
::
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarLengths
,
const
std
::
array
<
ck
::
index_t
,
Rank
-
NumBatchNormReduceDim
>
bnScaleBiasMeanVarStrides
,
const
void
*
p_x
,
const
void
*
p_scale
,
const
void
*
p_bias
,
double
epsilon
,
const
void
*
p_estimatedMean
,
const
void
*
p_estimatedVariance
,
void
*
p_y
)
{
(
void
)
bnScaleBiasMeanVarLengths
;
static_assert
(
NumBatchNormReduceDim
<
Rank
,
"Invalid number of reduced dimensions for batchnorm!"
);
using
DeviceNormalizeInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwise
<
ck
::
Tuple
<
InOutDataType
,
AccDataType
,
AccDataType
,
AccDataType
,
AccDataType
>
,
// x, mean,
// variance,
// scale,
// bias,
ck
::
Tuple
<
InOutDataType
>
,
// y
NormalizeInInfer
,
Rank
,
2
,
// MPerthread
ck
::
Sequence
<
1
,
1
,
1
,
1
,
1
>
,
// x, mean, variance, scale, bias
ck
::
Sequence
<
1
>>
;
// scalarPerVector: y
auto
invariantDims
=
get_invariant_dims
<
Rank
,
NumBatchNormReduceDim
>
(
reduceDims
);
std
::
array
<
ck
::
index_t
,
Rank
>
aligned_scaleBiasMeanVarStrides
{
0
};
int
i
=
0
;
for
(
auto
dim
:
invariantDims
)
{
assert
(
xyLengths
[
dim
]
==
bnScaleBiasMeanVarLengths
[
i
]);
aligned_scaleBiasMeanVarStrides
[
dim
]
=
bnScaleBiasMeanVarStrides
[
i
];
i
++
;
};
int32_t
reduceLength
=
1
;
for
(
auto
dim
:
reduceDims
)
reduceLength
*=
xyLengths
[
dim
];
int32_t
invariantLength
=
1
;
for
(
auto
dim
:
invariantDims
)
invariantLength
*=
xyLengths
[
dim
];
size_t
total_length
=
static_cast
<
size_t
>
(
invariantLength
)
*
reduceLength
;
float
avg_time
=
0.0
f
;
std
::
size_t
num_bytes
=
0
;
auto
dev_normalize
=
DeviceNormalizeInstance
{};
auto
argument_ptr1
=
dev_normalize
.
MakeArgumentPointer
(
xyLengths
,
{
xStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
,
aligned_scaleBiasMeanVarStrides
},
{
yStrides
},
{
p_x
,
p_estimatedMean
,
p_estimatedVariance
,
p_scale
,
p_bias
},
{
p_y
},
NormalizeInInfer
{
epsilon
});
if
(
!
dev_normalize
.
IsSupportedArgument
(
argument_ptr1
.
get
()))
{
std
::
cout
<<
"The runtime parameters seems not supported by the Devic, exiting!"
<<
std
::
endl
;
return
(
-
1
);
};
auto
invoker_ptr1
=
dev_normalize
.
MakeInvokerPointer
();
avg_time
+=
invoker_ptr1
->
Run
(
argument_ptr1
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
num_bytes
+=
(
total_length
*
(
1
*
sizeof
(
InOutDataType
)
+
4
*
sizeof
(
AccDataType
))
+
total_length
*
sizeof
(
InOutDataType
));
if
(
time_kernel
)
{
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
std
::
cout
<<
"Perf: "
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
};
return
(
0
);
};
example/34_batchnorm/batchnorm_infer_nhwc.cpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <limits>
#include <iostream>
#include <vector>
#include <array>
#include <algorithm>
#include <getopt.h>
#include "ck/ck.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_common_util.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_infer_nhwc_c.hpp"
#include "batchnorm_infer_impl.hpp"
template
<
typename
InOutDataType
,
typename
AccDataType
>
using
ReferenceBatchNormInferInstance
=
ck
::
tensor_operation
::
host
::
ReferenceBatchNormInfer_Input_N_H_W_C_Output_C
<
InOutDataType
,
AccDataType
>
;
static
struct
option
long_options
[]
=
{{
"inOutLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"verify"
,
required_argument
,
nullptr
,
'v'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
BatchNormInferArg
{
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inOutLengths
;
bool
do_verification
=
false
;
int
data_type
=
0
;
int
init_method
=
2
;
bool
time_kernel
=
false
;
public:
void
show_usage
(
const
char
*
cmd
)
{
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inOutLengths or -D, comma separated list of input tensor dimension "
"lengths, must have 4 integers for nhwc"
<<
std
::
endl
;
std
::
cout
<<
"--verify or -v, 1/0 to indicate whether to verify the batch-normalization "
"result by "
"comparing with the host-based batch-normalization"
<<
std
::
endl
;
std
::
cout
<<
"Arg1: data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2: init method used for bnScale and bnBias (0=no init, 1=single integer "
"value, 2=scope integer "
"value, 3=decimal value)"
<<
std
::
endl
;
std
::
cout
<<
"Arg3: time kernel (0=no, 1=yes)"
<<
std
::
endl
;
};
int
processArgs
(
int
argc
,
char
*
argv
[])
{
using
ck
::
host_common
::
getTypeValuesFromString
;
int
ch
;
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:v:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inOutLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
if
(
inOutLengths
.
size
()
!=
4
)
throw
std
::
runtime_error
(
"NHWC tensor layout should have 4 length values specified!"
);
break
;
case
'v'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_verification
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
break
;
default:
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
};
if
(
optind
+
3
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
++
]);
time_kernel
=
static_cast
<
bool
>
(
std
::
atoi
(
argv
[
optind
]));
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
3
&&
data_type
!=
5
&&
data_type
!=
6
)
return
(
-
1
);
return
(
0
);
};
};
using
namespace
ck
;
template
<
typename
InOutDataType
,
typename
AccDataType
>
bool
bnorm_infer_nhwc_test
(
bool
do_verification
,
int
init_method
,
bool
time_kernel
,
const
std
::
vector
<
size_t
>
inOutLengths
,
double
epsilon
)
{
// for NHWC BatchNorm calculation of mean and meansquare
constexpr
int
Rank
=
4
;
constexpr
int
NumReduceDim
=
3
;
const
std
::
vector
<
size_t
>
scaleBiasMeanVarLengths
=
{
inOutLengths
[
3
]};
// input data of the batchnorm forward algorithm
Tensor
<
InOutDataType
>
x
(
inOutLengths
);
Tensor
<
AccDataType
>
bnScale
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
bnBias
(
scaleBiasMeanVarLengths
);
// output data of the batchnorm forward algorithm
Tensor
<
InOutDataType
>
y_ref
(
inOutLengths
);
Tensor
<
InOutDataType
>
y
(
inOutLengths
);
Tensor
<
AccDataType
>
estimatedMean
(
scaleBiasMeanVarLengths
);
Tensor
<
AccDataType
>
estimatedVariance
(
scaleBiasMeanVarLengths
);
auto
inOutStrides
=
x
.
mDesc
.
GetStrides
();
auto
scaleBiasMeanVarStrides
=
bnScale
.
mDesc
.
GetStrides
();
std
::
size_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
if
constexpr
(
std
::
is_same
<
InOutDataType
,
int8_t
>::
value
)
{
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
InOutDataType
>
{
-
5
,
5
},
num_thread
);
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
2.5
f
;
const
float
noise_stddev
=
0.0001
f
;
estimatedMean
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
estimatedVariance
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
}
else
{
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
1.0
f
;
const
float
noise_stddev
=
0.0001
f
;
x
.
GenerateTensorValue
(
GeneratorTensor_4
<
InOutDataType
>
{
x_mean
,
x_stddev
},
num_thread
);
// initialize the savedMean to be values with tiny variation to the mean of the x values
estimatedMean
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
// initialize the variance to be values with tiny variation to the variance of the x values
estimatedVariance
.
GenerateTensorValue
(
GeneratorTensor_4
<
AccDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
};
if
(
do_verification
)
{
switch
(
init_method
)
{
case
0
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_0
<
AccDataType
>
{},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_0
<
AccDataType
>
{},
num_thread
);
break
;
case
1
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
1
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_1
<
AccDataType
>
{
0
},
num_thread
);
break
;
case
2
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_2
<
AccDataType
>
{
-
5
,
5
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_2
<
AccDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_3
<
AccDataType
>
{
-
5.0
f
,
5.0
f
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_3
<
AccDataType
>
{
-
5.0
f
,
5.0
f
},
num_thread
);
}
};
// these buffers are usually provided by the user application
DeviceMem
x_dev
(
sizeof
(
InOutDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
y_dev
(
sizeof
(
InOutDataType
)
*
y
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnScale_dev
(
sizeof
(
AccDataType
)
*
bnScale
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnBias_dev
(
sizeof
(
AccDataType
)
*
bnBias
.
mDesc
.
GetElementSpaceSize
());
// mean_dev or resultSaveMean_dev
DeviceMem
estimatedMean_dev
(
sizeof
(
AccDataType
)
*
estimatedMean
.
mDesc
.
GetElementSpaceSize
());
// meansquare_dev or resultSaveInvVariance_dev
DeviceMem
estimatedVariance_dev
(
sizeof
(
AccDataType
)
*
estimatedVariance
.
mDesc
.
GetElementSpaceSize
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
bnScale_dev
.
ToDevice
(
bnScale
.
mData
.
data
());
bnBias_dev
.
ToDevice
(
bnBias
.
mData
.
data
());
estimatedMean_dev
.
ToDevice
(
estimatedMean
.
mData
.
data
());
estimatedVariance_dev
.
ToDevice
(
estimatedVariance
.
mData
.
data
());
using
ck
::
index_t
;
std
::
array
<
index_t
,
Rank
>
i_inOutLengths
;
std
::
array
<
index_t
,
Rank
>
i_inOutStrides
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumReduceDim
>
i_scaleBiasMeanVarStrides
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
i_inOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
i_inOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
scaleBiasMeanVarLengths
.
end
(),
i_scaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
i_scaleBiasMeanVarStrides
.
begin
());
int
result
=
0
;
result
=
bnorm_infer
<
InOutDataType
,
AccDataType
,
Rank
,
NumReduceDim
,
false
>
(
time_kernel
,
{
0
,
1
,
2
},
i_inOutLengths
,
i_inOutStrides
,
i_inOutStrides
,
i_scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarStrides
,
x_dev
.
GetDeviceBuffer
(),
bnScale_dev
.
GetDeviceBuffer
(),
bnBias_dev
.
GetDeviceBuffer
(),
epsilon
,
estimatedMean_dev
.
GetDeviceBuffer
(),
estimatedVariance_dev
.
GetDeviceBuffer
(),
y_dev
.
GetDeviceBuffer
());
if
(
result
<
0
)
return
(
false
);
bool
pass
=
true
;
if
(
do_verification
)
{
auto
batchNormInfer_ref
=
ReferenceBatchNormInferInstance
<
InOutDataType
,
AccDataType
>
{};
auto
argument_ptr_ref
=
batchNormInfer_ref
.
MakeArgumentPointer
(
i_inOutLengths
,
i_inOutStrides
,
i_inOutStrides
,
i_scaleBiasMeanVarLengths
,
i_scaleBiasMeanVarStrides
,
x
.
mData
.
data
(),
bnScale
.
mData
.
data
(),
bnBias
.
mData
.
data
(),
epsilon
,
estimatedMean
.
mData
.
data
(),
estimatedVariance
.
mData
.
data
(),
y_ref
.
mData
.
data
());
if
(
!
batchNormInfer_ref
.
IsSupportedArgument
(
argument_ptr_ref
.
get
()))
{
std
::
cout
<<
"The runtime parameters seems not supported by the BatchNorm instance, exiting!"
<<
std
::
endl
;
return
(
-
2
);
};
auto
invoker_ptr_ref
=
batchNormInfer_ref
.
MakeInvokerPointer
();
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
y_dev
.
FromDevice
(
y
.
mData
.
data
());
pass
=
pass
&&
ck
::
utils
::
check_err
(
y
.
mData
,
y_ref
.
mData
);
};
return
(
pass
);
};
static
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
int
main
(
int
argc
,
char
*
argv
[])
{
bool
pass
=
true
;
if
(
argc
>
1
)
{
BatchNormInferArg
arg
;
if
(
arg
.
processArgs
(
argc
,
argv
)
<
0
)
return
(
-
1
);
if
(
arg
.
data_type
==
0
)
{
pass
=
bnorm_infer_nhwc_test
<
ck
::
half_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
epsilon
);
}
else
if
(
arg
.
data_type
==
1
)
{
pass
=
bnorm_infer_nhwc_test
<
float
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
epsilon
);
}
else
if
(
arg
.
data_type
==
3
)
{
pass
=
bnorm_infer_nhwc_test
<
int8_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
epsilon
);
}
else
if
(
arg
.
data_type
==
5
)
{
pass
=
bnorm_infer_nhwc_test
<
ck
::
bhalf_t
,
float
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
epsilon
);
}
else
if
(
arg
.
data_type
==
6
)
{
pass
=
bnorm_infer_nhwc_test
<
double
,
double
>
(
arg
.
do_verification
,
arg
.
init_method
,
arg
.
time_kernel
,
arg
.
inOutLengths
,
epsilon
);
};
}
else
{
pass
=
bnorm_infer_nhwc_test
<
ck
::
half_t
,
float
>
(
true
,
2
,
false
,
// don't time kernel
{
128
,
16
,
16
,
1024
},
epsilon
);
};
return
(
pass
?
0
:
1
);
}
example/3
1
_splitK_gemm/CMakeLists.txt
→
example/3
5
_splitK_gemm/CMakeLists.txt
View file @
1dbdab56
File moved
example/3
1
_splitK_gemm/splitK_gemm_xdl_bfp16.cpp
→
example/3
5
_splitK_gemm/splitK_gemm_xdl_bfp16.cpp
View file @
1dbdab56
File moved
example/3
1
_splitK_gemm/splitK_gemm_xdl_fp16.cpp
→
example/3
5
_splitK_gemm/splitK_gemm_xdl_fp16.cpp
View file @
1dbdab56
File moved
example/3
1
_splitK_gemm/splitK_gemm_xdl_fp32.cpp
→
example/3
5
_splitK_gemm/splitK_gemm_xdl_fp32.cpp
View file @
1dbdab56
File moved
example/3
1
_splitK_gemm/splitK_gemm_xdl_int8.cpp
→
example/3
5
_splitK_gemm/splitK_gemm_xdl_int8.cpp
View file @
1dbdab56
File moved
example/CMakeLists.txt
View file @
1dbdab56
...
...
@@ -30,7 +30,7 @@ add_subdirectory(12_reduce)
add_subdirectory
(
13_pool2d_fwd
)
add_subdirectory
(
14_gemm_xdl_requant_relu_requant
)
add_subdirectory
(
15_grouped_gemm
)
add_subdirectory
(
16_gemm_reduce
)
add_subdirectory
(
16_gemm_
multi_d_multi_
reduce
s
)
add_subdirectory
(
17_convnd_bwd_data
)
add_subdirectory
(
18_batched_gemm_reduce
)
add_subdirectory
(
19_binary_elementwise
)
...
...
@@ -42,6 +42,11 @@ add_subdirectory(24_batched_gemm)
add_subdirectory
(
25_gemm_bias_e_permute
)
add_subdirectory
(
26_contraction
)
add_subdirectory
(
27_layernorm
)
add_subdirectory
(
28_grouped_gemm_bias
)
add_subdirectory
(
30_grouped_convnd_fwd_bias_relu
)
add_subdirectory
(
31_splitK_gemm
)
add_subdirectory
(
28_grouped_gemm_bias_e_permute
)
add_subdirectory
(
29_batched_gemm_bias_e_permute
)
add_subdirectory
(
30_grouped_convnd_fwd_bias_relu_add
)
add_subdirectory
(
31_batched_gemm_gemm
)
add_subdirectory
(
32_batched_gemm_scale_softmax_gemm
)
add_subdirectory
(
33_multiple_reduce
)
add_subdirectory
(
34_batchnorm
)
add_subdirectory
(
35_splitK_gemm
)
include/ck/tensor_description/tensor_descriptor.hpp
View file @
1dbdab56
...
...
@@ -4,6 +4,7 @@
#pragma once
#include "ck/utility/common_header.hpp"
#include "ck/utility/sequence_helper.hpp"
#include "ck/tensor_description/multi_index_transform.hpp"
namespace
ck
{
...
...
@@ -159,6 +160,12 @@ struct TensorDescriptor
return
transforms_
[
Number
<
itran
>
{}].
GetUpperLengths
()[
Number
<
idim_up
>
{}];
}
__host__
__device__
constexpr
auto
GetLengths
()
const
{
// FIXME: use Tuple of reference instead
return
generate_sequence_v2
([
&
](
auto
I
)
{
return
GetLength
(
I
);
},
Number
<
ndim_visible_
>
{});
}
__host__
__device__
constexpr
auto
GetElementSize
()
const
{
return
element_size_
;
}
__host__
__device__
constexpr
auto
GetElementSpaceSize
()
const
{
return
element_space_size_
;
}
...
...
include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops.hpp
View file @
1dbdab56
...
...
@@ -25,6 +25,22 @@ constexpr LoopScheduler make_default_loop_scheduler()
#endif // if CK_EXPERIMENTAL_INTER_WAVE_SCHEDULING
}
template
<
index_t
MNXdlPerWave
,
index_t
MNWaves
,
index_t
MNPerXdl
,
typename
TileDesc_K0_MN_K1
>
__host__
__device__
static
constexpr
auto
MakeGemmMmaTileDescriptor_MN0_MN1_MN2_K
(
const
TileDesc_K0_MN_K1
&
)
{
constexpr
index_t
K0
=
TileDesc_K0_MN_K1
{}.
GetLength
(
Number
<
0
>
{});
constexpr
index_t
K1
=
TileDesc_K0_MN_K1
{}.
GetLength
(
Number
<
2
>
{});
return
transform_tensor_descriptor
(
TileDesc_K0_MN_K1
{},
make_tuple
(
make_merge_transform_v3_division_mod
(
make_tuple
(
Number
<
K0
>
{},
Number
<
K1
>
{})),
make_unmerge_transform
(
make_tuple
(
Number
<
MNXdlPerWave
>
{},
Number
<
MNWaves
>
{},
Number
<
MNPerXdl
>
{}))),
make_tuple
(
Sequence
<
0
,
2
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
3
>
{},
Sequence
<
0
,
1
,
2
>
{}));
}
template
<
index_t
BlockSize
,
typename
FloatAB
,
typename
FloatAcc
,
...
...
@@ -585,4 +601,359 @@ constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector()
}
};
// Blockwise gemm supporting
// 1. regular XDL output M2_M3_M4_M2 and transposed XDL output M2_N2_N3_N4
// 2. decoupled input tile descriptor and mma tile descriptor in order to support both vgpr and LDS
// source buffer
// 3. configurable k index starting position and step size after each FMA/XDL instruction
template
<
index_t
BlockSize
,
typename
FloatAB
,
typename
FloatAcc
,
typename
ATileDesc
,
typename
BTileDesc
,
typename
AMmaTileDesc
,
typename
BMmaTileDesc
,
index_t
MPerBlock
,
index_t
NPerBlock
,
index_t
KPerBlock
,
index_t
MPerXDL
,
index_t
NPerXDL
,
index_t
MRepeat
,
index_t
NRepeat
,
index_t
KPack
,
bool
TransposeC
=
false
,
index_t
AMmaKStride
=
KPack
*
XdlopsGemm
<
FloatAB
,
MPerXDL
,
NPerXDL
,
KPack
,
TransposeC
>{}.
K0PerXdlops
,
index_t
BMmaKStride
=
KPack
*
XdlopsGemm
<
FloatAB
,
MPerXDL
,
NPerXDL
,
KPack
,
TransposeC
>
{}.
K0PerXdlops
>
struct
BlockwiseGemmXdlops_v2
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
auto
I2
=
Number
<
2
>
{};
static
constexpr
auto
I3
=
Number
<
3
>
{};
using
ThisThreadBlock
=
ThisThreadBlock
<
BlockSize
>
;
static
constexpr
index_t
WaveSize
=
get_warp_size
();
static
constexpr
index_t
A_K0
=
ATileDesc
{}.
GetLength
(
I0
);
static
constexpr
index_t
B_K0
=
BTileDesc
{}.
GetLength
(
I0
);
static
constexpr
index_t
A_K1
=
ATileDesc
{}.
GetLength
(
I2
);
static
constexpr
index_t
B_K1
=
BTileDesc
{}.
GetLength
(
I2
);
static
constexpr
auto
xdlops_gemm
=
XdlopsGemm
<
FloatAB
,
MPerXDL
,
NPerXDL
,
KPack
,
TransposeC
>
{};
static
constexpr
index_t
KPerThread
=
KPerBlock
/
xdlops_gemm
.
K0PerXdlops
;
static
constexpr
index_t
MWaves
=
MPerBlock
/
(
MRepeat
*
MPerXDL
);
static
constexpr
index_t
NWaves
=
NPerBlock
/
(
NRepeat
*
NPerXDL
);
StaticBufferTupleOfVector
<
AddressSpaceEnum
::
Vgpr
,
FloatAcc
,
MRepeat
*
NRepeat
,
xdlops_gemm
.
GetRegSizePerXdlops
(),
true
>
c_thread_buf_
;
__host__
__device__
constexpr
auto
&
GetCThreadBuffer
()
{
return
c_thread_buf_
;
}
__device__
static
auto
GetWaveIdx
()
{
const
index_t
thread_id
=
ThisThreadBlock
::
GetThreadId
();
constexpr
auto
threadid_to_wave_idx_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_merge_transform
(
make_tuple
(
MWaves
,
NWaves
,
WaveSize
))),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}),
make_tuple
(
Sequence
<
0
>
{}));
return
threadid_to_wave_idx_adaptor
.
CalculateBottomIndex
(
make_multi_index
(
thread_id
));
}
__device__
static
auto
CalculateAThreadOriginDataIndex
()
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_m
=
wave_idx
[
I0
];
const
auto
xdlops_a_idx
=
xdlops_gemm
.
CalculateAThreadOriginDataIndex
();
return
make_tuple
(
0
,
waveId_m
,
xdlops_a_idx
[
I1
],
KPack
*
xdlops_a_idx
[
I0
]);
}
__device__
static
auto
CalculateBThreadOriginDataIndex
()
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_n
=
wave_idx
[
I1
];
const
auto
xdlops_b_idx
=
xdlops_gemm
.
CalculateBThreadOriginDataIndex
();
return
make_tuple
(
0
,
waveId_n
,
xdlops_b_idx
[
I1
],
KPack
*
xdlops_b_idx
[
I0
]);
}
template
<
index_t
m0
,
index_t
n0
,
index_t
xdlops_i
,
index_t
blk_i
>
__device__
static
auto
CalculateCThreadOriginDataIndex
(
Number
<
m0
>
,
Number
<
n0
>
,
Number
<
xdlops_i
>
,
Number
<
blk_i
>
)
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_m
=
wave_idx
[
I0
];
const
auto
waveId_n
=
wave_idx
[
I1
];
const
auto
blk_idx
=
xdlops_gemm
.
GetBeginOfThreadBlk
(
xdlops_i
,
blk_i
);
constexpr
auto
mrepeat_mwave_mperxdl_to_m_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_unmerge_transform
(
make_tuple
(
MRepeat
,
MWaves
,
MPerXDL
))),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}));
constexpr
auto
nrepeat_nwave_nperxdl_to_n_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_unmerge_transform
(
make_tuple
(
NRepeat
,
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}));
const
index_t
c_thread_m
=
mrepeat_mwave_mperxdl_to_m_adaptor
.
CalculateBottomIndex
(
make_tuple
(
m0
,
waveId_m
,
blk_idx
[
I0
]))[
I0
];
const
index_t
c_thread_n
=
nrepeat_nwave_nperxdl_to_n_adaptor
.
CalculateBottomIndex
(
make_tuple
(
n0
,
waveId_n
,
blk_idx
[
I1
]))[
I0
];
return
make_tuple
(
c_thread_m
,
c_thread_n
);
}
using
Tuple4
=
decltype
(
CalculateAThreadOriginDataIndex
());
__host__
__device__
BlockwiseGemmXdlops_v2
(
Tuple4
a_origin
=
CalculateAThreadOriginDataIndex
(),
Tuple4
b_origin
=
CalculateBThreadOriginDataIndex
())
:
a_thread_copy_
(
a_origin
),
b_thread_copy_
(
b_origin
)
{
static_assert
(
AMmaTileDesc
::
IsKnownAtCompileTime
()
&&
BMmaTileDesc
::
IsKnownAtCompileTime
(),
"wrong! Desc should be known at compile-time"
);
static_assert
(
ThisThreadBlock
::
GetNumOfThread
()
==
MWaves
*
NWaves
*
WaveSize
,
"ThisThreadBlock::GetNumOfThread() != MWaves * NWaves * WaveSize
\n
"
);
static_assert
(
MPerBlock
%
(
MPerXDL
*
MRepeat
)
==
0
&&
NPerBlock
%
(
NPerXDL
*
NRepeat
)
==
0
,
"wrong!"
);
}
__host__
__device__
BlockwiseGemmXdlops_v2
(
const
BlockwiseGemmXdlops_v2
&
other
)
:
a_thread_copy_
(
other
.
a_origin
),
b_thread_copy_
(
other
.
b_origin
)
{
}
// transposed XDL output supporting C_xdl' = B_xdl' * A_xdl'
__host__
__device__
static
constexpr
auto
GetCThreadDescriptor_M0_N0_M1_N1_M2_N2_N3_N4
()
{
constexpr
auto
c_m0_m1_m2_n_tblk_lens
=
xdlops_gemm
.
GetCM0M1M2NThreadBlkLengths
();
constexpr
auto
M0
=
c_m0_m1_m2_n_tblk_lens
[
I0
];
constexpr
auto
M1
=
c_m0_m1_m2_n_tblk_lens
[
I1
];
constexpr
auto
M2
=
c_m0_m1_m2_n_tblk_lens
[
I2
];
constexpr
auto
N
=
c_m0_m1_m2_n_tblk_lens
[
I3
];
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
I1
,
I1
,
N
,
M0
,
M1
,
M2
));
}
// XDL output supporting C_xdl = A_xdl * B_xdl
__host__
__device__
static
constexpr
auto
GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_m0_m1_m2_n_tblk_lens
=
xdlops_gemm
.
GetCM0M1M2NThreadBlkLengths
();
constexpr
auto
M0
=
c_m0_m1_m2_n_tblk_lens
[
I0
];
constexpr
auto
M1
=
c_m0_m1_m2_n_tblk_lens
[
I1
];
constexpr
auto
M2
=
c_m0_m1_m2_n_tblk_lens
[
I2
];
constexpr
auto
N
=
c_m0_m1_m2_n_tblk_lens
[
I3
];
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
I1
,
I1
,
M0
,
M1
,
M2
,
N
));
}
__host__
__device__
static
constexpr
auto
GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_m0_m1_m2_n_tblk_lens
=
xdlops_gemm
.
GetCM0M1M2NThreadBlkLengths
();
constexpr
auto
M0
=
c_m0_m1_m2_n_tblk_lens
[
I0
];
constexpr
auto
M1
=
c_m0_m1_m2_n_tblk_lens
[
I1
];
constexpr
auto
M2
=
c_m0_m1_m2_n_tblk_lens
[
I2
];
constexpr
auto
N
=
c_m0_m1_m2_n_tblk_lens
[
I3
];
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
I1
,
I1
,
M0
,
M1
,
M2
,
N
));
}
// transposed XDL output supporting C_xdl' = B_xdl' * A_xdl'
__host__
__device__
static
constexpr
auto
GetCBlockDescriptor_M0_N0_M1_N1_M2_N2_N3_N4
()
{
constexpr
auto
c_block_desc_m0_n0_m1_n1_m2_n2
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
NWaves
>
{},
Number
<
MPerXDL
>
{},
Number
<
NPerXDL
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_M0_N0_M1_N1_M2_N2_N3_N4
(
c_block_desc_m0_n0_m1_n1_m2_n2
);
}
// XDL output supporting C_xdl = A_xdl * B_xdl
__host__
__device__
static
constexpr
auto
GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_block_desc_m0_n0_m1_n1_m2_n2
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
NWaves
>
{},
Number
<
MPerXDL
>
{},
Number
<
NPerXDL
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
c_block_desc_m0_n0_m1_n1_m2_n2
);
}
__host__
__device__
static
constexpr
auto
GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_block_desc_g_m0_n0_m1_n1_m2_n2
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
NWaves
>
{},
Number
<
MPerXDL
>
{},
Number
<
NPerXDL
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
c_block_desc_g_m0_n0_m1_n1_m2_n2
);
}
template
<
typename
CGridDesc_M_N
>
__host__
__device__
static
constexpr
auto
MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
const
CGridDesc_M_N
&
c_grid_desc_m_n
)
{
const
auto
M
=
c_grid_desc_m_n
.
GetLength
(
I0
);
const
auto
N
=
c_grid_desc_m_n
.
GetLength
(
I1
);
const
auto
c_grid_desc_m0_n0_m1_n1_m2_n2
=
transform_tensor_descriptor
(
c_grid_desc_m_n
,
make_tuple
(
make_unmerge_transform
(
make_tuple
(
M
/
(
MWaves
*
MPerXDL
),
MWaves
,
MPerXDL
)),
make_unmerge_transform
(
make_tuple
(
N
/
(
NWaves
*
NPerXDL
),
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
0
,
2
,
4
>
{},
Sequence
<
1
,
3
,
5
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
c_grid_desc_m0_n0_m1_n1_m2_n2
);
}
template
<
typename
CGridDesc_G_M_N
>
__host__
__device__
static
constexpr
auto
MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
const
CGridDesc_G_M_N
&
c_grid_desc_g_m_n
)
{
const
auto
G
=
c_grid_desc_g_m_n
.
GetLength
(
I0
);
const
auto
M
=
c_grid_desc_g_m_n
.
GetLength
(
I1
);
const
auto
N
=
c_grid_desc_g_m_n
.
GetLength
(
I2
);
const
auto
c_grid_desc_g_m0_n0_m1_n1_m2_n2
=
transform_tensor_descriptor
(
c_grid_desc_g_m_n
,
make_tuple
(
make_pass_through_transform
(
G
),
make_unmerge_transform
(
make_tuple
(
M
/
(
MWaves
*
MPerXDL
),
MWaves
,
MPerXDL
)),
make_unmerge_transform
(
make_tuple
(
N
/
(
NWaves
*
NPerXDL
),
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{},
Sequence
<
2
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
,
3
,
5
>
{},
Sequence
<
2
,
4
,
6
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
c_grid_desc_g_m0_n0_m1_n1_m2_n2
);
}
static
constexpr
AMmaTileDesc
a_block_desc_m0_m1_m2_k
;
static
constexpr
BMmaTileDesc
b_block_desc_n0_n1_n2_k
;
template
<
typename
ABlockBuffer
,
typename
BBlockBuffer
,
typename
CThreadBuffer
>
__device__
void
Run
(
const
ABlockBuffer
&
a_block_buf
,
const
BBlockBuffer
&
b_block_buf
,
CThreadBuffer
&
c_thread_buf
)
const
{
auto
a_thread_buf
=
make_static_buffer
<
AddressSpaceEnum
::
Vgpr
,
FloatAB
>
(
a_thread_desc_
.
GetElementSpaceSize
());
auto
b_thread_buf
=
make_static_buffer
<
AddressSpaceEnum
::
Vgpr
,
FloatAB
>
(
b_thread_desc_
.
GetElementSpaceSize
());
static_for
<
0
,
KPerThread
/
KPack
,
1
>
{}([
&
](
auto
k
)
{
// k=0,1,2 instead of k=0,kpack*1, ...
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
// read A
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k
,
make_tuple
(
m0
,
I0
,
I0
,
Number
<
k
*
AMmaKStride
>
{}),
a_block_buf
,
a_thread_desc_
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
a_thread_buf
);
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
// read B
b_thread_copy_
.
Run
(
b_block_desc_n0_n1_n2_k
,
make_tuple
(
n0
,
I0
,
I0
,
Number
<
k
*
BMmaKStride
>
{}),
b_block_buf
,
b_thread_desc_
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
b_thread_buf
);
vector_type
<
FloatAB
,
KPack
>
a_thread_vec
;
vector_type
<
FloatAB
,
KPack
>
b_thread_vec
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
i
)
{
a_thread_vec
.
template
AsType
<
FloatAB
>()(
i
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
(
0
,
0
,
0
,
i
))
>
{}];
b_thread_vec
.
template
AsType
<
FloatAB
>()(
i
)
=
b_thread_buf
[
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
0
,
0
,
0
,
i
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
FloatAB
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
template
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>{}));
});
});
});
}
protected:
// A[M0, M1, M2, KPack]
static
constexpr
auto
a_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
I1
,
I1
,
Number
<
KPack
>
{}));
// B[N0, N1, N2, KPack]
static
constexpr
auto
b_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
I1
,
I1
,
Number
<
KPack
>
{}));
// C[M, N, NumRegXdlops]
static
constexpr
auto
c_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
xdlops_gemm
.
GetRegSizePerXdlops
()));
using
AThreadCopy
=
ThreadwiseTensorSliceTransfer_v4
<
FloatAB
,
FloatAB
,
decltype
(
a_block_desc_m0_m1_m2_k
),
decltype
(
a_thread_desc_
),
Sequence
<
1
,
1
,
1
,
KPack
>
,
Sequence
<
0
,
1
,
2
,
3
>
,
3
,
A_K1
,
A_K1
>
;
using
BThreadCopy
=
ThreadwiseTensorSliceTransfer_v4
<
FloatAB
,
FloatAB
,
decltype
(
b_block_desc_n0_n1_n2_k
),
decltype
(
b_thread_desc_
),
Sequence
<
1
,
1
,
1
,
KPack
>
,
Sequence
<
0
,
1
,
2
,
3
>
,
3
,
B_K1
,
B_K1
>
;
AThreadCopy
a_thread_copy_
;
BThreadCopy
b_thread_copy_
;
};
}
// namespace ck
include/ck/tensor_operation/gpu/block/blockwise_gemm_xdlops_skip_b_lds.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/utility/common_header.hpp"
#include "ck/tensor_operation/gpu/thread/threadwise_tensor_slice_transfer.hpp"
#include "ck/tensor_operation/gpu/warp/xdlops_gemm.hpp"
#include "ck/tensor_description/tensor_adaptor.hpp"
namespace
ck
{
template
<
index_t
BlockSize
,
typename
FloatAB
,
typename
FloatAcc
,
typename
AK0MK1BlockDesc
,
typename
BK0K0BN0N1N2N3K1BlockDesc
,
index_t
MPerBlock
,
index_t
NPerBlock
,
index_t
K0PerBlock
,
index_t
MPerXDL
,
index_t
NPerXDL
,
index_t
MRepeat
,
index_t
NRepeat
,
index_t
KPack
>
struct
BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1r1
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
auto
I2
=
Number
<
2
>
{};
static
constexpr
auto
I3
=
Number
<
3
>
{};
static
constexpr
index_t
WaveSize
=
64
;
static
constexpr
index_t
KPerBlock
=
K0PerBlock
*
KPack
;
static
constexpr
index_t
A_K0
=
AK0MK1BlockDesc
{}.
GetLength
(
I0
);
static
constexpr
index_t
A_K1
=
AK0MK1BlockDesc
{}.
GetLength
(
I2
);
static
constexpr
auto
xdlops_gemm
=
XdlopsGemm
<
FloatAB
,
MPerXDL
,
NPerXDL
,
KPack
>
{};
static
constexpr
index_t
KPerThread
=
KPerBlock
/
xdlops_gemm
.
K0PerXdlops
;
static
constexpr
index_t
K0PerThread
=
K0PerBlock
/
xdlops_gemm
.
K0PerXdlops
;
static
constexpr
index_t
MWaves
=
MPerBlock
/
(
MRepeat
*
MPerXDL
);
static
constexpr
index_t
NWaves
=
NPerBlock
/
(
NRepeat
*
NPerXDL
);
StaticBufferTupleOfVector
<
AddressSpaceEnum
::
Vgpr
,
FloatAcc
,
MRepeat
*
NRepeat
,
xdlops_gemm
.
GetRegSizePerXdlops
(),
true
>
c_thread_buf_
;
__host__
__device__
constexpr
auto
&
GetCThreadBuffer
()
{
return
c_thread_buf_
;
}
__device__
static
auto
GetWaveIdx
()
{
const
index_t
thread_id
=
get_thread_local_1d_id
();
constexpr
auto
threadid_to_wave_idx_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_merge_transform
(
make_tuple
(
MWaves
,
NWaves
,
WaveSize
))),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}),
make_tuple
(
Sequence
<
0
>
{}));
return
threadid_to_wave_idx_adaptor
.
CalculateBottomIndex
(
make_multi_index
(
thread_id
));
}
__device__
static
auto
CalculateAThreadOriginDataIndex
()
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_m
=
wave_idx
[
I0
];
const
auto
xdlops_a_idx
=
xdlops_gemm
.
CalculateAThreadOriginDataIndex
();
return
make_tuple
(
0
,
waveId_m
,
xdlops_a_idx
[
I1
],
KPerThread
*
xdlops_a_idx
[
I0
]);
}
__device__
static
auto
CalculateBThreadOriginDataIndex
()
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_n
=
wave_idx
[
I1
];
const
auto
xdlops_b_idx
=
xdlops_gemm
.
CalculateBThreadOriginDataIndex
();
return
make_tuple
(
0
,
waveId_n
,
xdlops_b_idx
[
I1
],
KPerThread
*
xdlops_b_idx
[
I0
]);
}
template
<
index_t
m0
,
index_t
n0
,
index_t
xdlops_i
,
index_t
blk_i
>
__device__
static
auto
CalculateCThreadOriginDataIndex
(
Number
<
m0
>
,
Number
<
n0
>
,
Number
<
xdlops_i
>
,
Number
<
blk_i
>
)
{
const
auto
wave_idx
=
GetWaveIdx
();
const
auto
waveId_m
=
wave_idx
[
I0
];
const
auto
waveId_n
=
wave_idx
[
I1
];
const
auto
blk_idx
=
xdlops_gemm
.
GetBeginOfThreadBlk
(
xdlops_i
,
blk_i
);
constexpr
auto
mrepeat_mwave_mperxdl_to_m_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_unmerge_transform
(
make_tuple
(
MRepeat
,
MWaves
,
MPerXDL
))),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}));
constexpr
auto
nrepeat_nwave_nperxdl_to_n_adaptor
=
make_single_stage_tensor_adaptor
(
make_tuple
(
make_unmerge_transform
(
make_tuple
(
NRepeat
,
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
,
1
,
2
>
{}));
const
index_t
c_thread_m
=
mrepeat_mwave_mperxdl_to_m_adaptor
.
CalculateBottomIndex
(
make_tuple
(
m0
,
waveId_m
,
blk_idx
[
I0
]))[
I0
];
const
index_t
c_thread_n
=
nrepeat_nwave_nperxdl_to_n_adaptor
.
CalculateBottomIndex
(
make_tuple
(
n0
,
waveId_n
,
blk_idx
[
I1
]))[
I0
];
return
make_tuple
(
c_thread_m
,
c_thread_n
);
}
__host__
__device__
BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1r1
()
{
static_assert
(
AK0MK1BlockDesc
::
IsKnownAtCompileTime
()
&&
BK0K0BN0N1N2N3K1BlockDesc
::
IsKnownAtCompileTime
(),
"wrong! Desc should be known at compile-time"
);
static_assert
(
BlockSize
==
MWaves
*
NWaves
*
WaveSize
,
"BlockSize != MWaves * NWaves * WaveSize
\n
"
);
static_assert
(
MPerBlock
%
(
MPerXDL
*
MRepeat
)
==
0
&&
NPerBlock
%
(
NPerXDL
*
NRepeat
)
==
0
,
"wrong!"
);
}
__host__
__device__
static
constexpr
auto
GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_m0_m1_m2_n_tblk_lens
=
xdlops_gemm
.
GetCM0M1M2NThreadBlkLengths
();
constexpr
auto
M0
=
c_m0_m1_m2_n_tblk_lens
[
I0
];
constexpr
auto
M1
=
c_m0_m1_m2_n_tblk_lens
[
I1
];
constexpr
auto
M2
=
c_m0_m1_m2_n_tblk_lens
[
I2
];
constexpr
auto
N
=
c_m0_m1_m2_n_tblk_lens
[
I3
];
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
I1
,
I1
,
M0
,
M1
,
M2
,
N
));
}
__host__
__device__
static
constexpr
auto
GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_m0_m1_m2_n_tblk_lens
=
xdlops_gemm
.
GetCM0M1M2NThreadBlkLengths
();
constexpr
auto
M0
=
c_m0_m1_m2_n_tblk_lens
[
I0
];
constexpr
auto
M1
=
c_m0_m1_m2_n_tblk_lens
[
I1
];
constexpr
auto
M2
=
c_m0_m1_m2_n_tblk_lens
[
I2
];
constexpr
auto
N
=
c_m0_m1_m2_n_tblk_lens
[
I3
];
return
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
I1
,
I1
,
M0
,
M1
,
M2
,
N
));
}
__host__
__device__
static
constexpr
auto
GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_block_desc_m0_n0_m1_n1_m2_n2
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
NWaves
>
{},
Number
<
MPerXDL
>
{},
Number
<
NPerXDL
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
c_block_desc_m0_n0_m1_n1_m2_n2
);
}
__host__
__device__
static
constexpr
auto
GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
()
{
constexpr
auto
c_block_desc_g_m0_n0_m1_n1_m2_n2
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
NWaves
>
{},
Number
<
MPerXDL
>
{},
Number
<
NPerXDL
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
c_block_desc_g_m0_n0_m1_n1_m2_n2
);
}
template
<
typename
CGridDesc_M_N
>
__host__
__device__
static
constexpr
auto
MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
const
CGridDesc_M_N
&
c_grid_desc_m_n
)
{
const
auto
M
=
c_grid_desc_m_n
.
GetLength
(
I0
);
const
auto
N
=
c_grid_desc_m_n
.
GetLength
(
I1
);
const
auto
c_grid_desc_m0_n0_m1_n1_m2_n2
=
transform_tensor_descriptor
(
c_grid_desc_m_n
,
make_tuple
(
make_unmerge_transform
(
make_tuple
(
M
/
(
MWaves
*
MPerXDL
),
MWaves
,
MPerXDL
)),
make_unmerge_transform
(
make_tuple
(
N
/
(
NWaves
*
NPerXDL
),
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
0
,
2
,
4
>
{},
Sequence
<
1
,
3
,
5
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2
(
c_grid_desc_m0_n0_m1_n1_m2_n2
);
}
template
<
typename
CGridDesc_G_M_N
>
__host__
__device__
static
constexpr
auto
MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
const
CGridDesc_G_M_N
&
c_grid_desc_g_m_n
)
{
const
auto
G
=
c_grid_desc_g_m_n
.
GetLength
(
I0
);
const
auto
M
=
c_grid_desc_g_m_n
.
GetLength
(
I1
);
const
auto
N
=
c_grid_desc_g_m_n
.
GetLength
(
I2
);
const
auto
c_grid_desc_g_m0_n0_m1_n1_m2_n2
=
transform_tensor_descriptor
(
c_grid_desc_g_m_n
,
make_tuple
(
make_pass_through_transform
(
G
),
make_unmerge_transform
(
make_tuple
(
M
/
(
MWaves
*
MPerXDL
),
MWaves
,
MPerXDL
)),
make_unmerge_transform
(
make_tuple
(
N
/
(
NWaves
*
NPerXDL
),
NWaves
,
NPerXDL
))),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
>
{},
Sequence
<
2
>
{}),
make_tuple
(
Sequence
<
0
>
{},
Sequence
<
1
,
3
,
5
>
{},
Sequence
<
2
,
4
,
6
>
{}));
return
xdlops_gemm
.
MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2
(
c_grid_desc_g_m0_n0_m1_n1_m2_n2
);
}
__host__
__device__
static
constexpr
auto
MakeABlockDescriptor_M0_M1_M2_K
()
{
return
transform_tensor_descriptor
(
AK0MK1BlockDesc
{},
make_tuple
(
make_merge_transform_v3_division_mod
(
make_tuple
(
Number
<
A_K0
>
{},
Number
<
A_K1
>
{})),
make_unmerge_transform
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
MWaves
>
{},
Number
<
MPerXDL
>
{}))),
make_tuple
(
Sequence
<
0
,
2
>
{},
Sequence
<
1
>
{}),
make_tuple
(
Sequence
<
3
>
{},
Sequence
<
0
,
1
,
2
>
{}));
}
__device__
void
MoveABlockSliceWindow
()
{
a_thread_copy_
.
MoveSrcSliceWindow
(
a_block_desc_m0_m1_m2_k
,
make_multi_index
(
0
,
0
,
0
,
K0PerBlock
*
KPack
));
}
__device__
void
ResetABlockStartWindow
()
{
a_thread_copy_
.
SetSrcCoord
(
CalculateAThreadOriginDataIndex
());
}
static
constexpr
auto
a_block_desc_m0_m1_m2_k
=
MakeABlockDescriptor_M0_M1_M2_K
();
template
<
typename
ABlockBuffer
,
typename
BBlockBuffer
,
typename
CThreadBuffer
>
__device__
void
Run
(
const
ABlockBuffer
&
a_block_buf
,
const
BBlockBuffer
&
b_thread_buf
,
CThreadBuffer
&
c_thread_buf
)
const
{
auto
a_thread_buf
=
make_static_buffer
<
AddressSpaceEnum
::
Vgpr
,
FloatAB
>
(
a_thread_desc_
.
GetElementSpaceSize
());
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
m0
)
{
// read A
a_thread_copy_
.
Run
(
a_block_desc_m0_m1_m2_k
,
make_tuple
(
m0
,
I0
,
I0
,
I0
),
a_block_buf
,
a_thread_desc_
,
make_tuple
(
I0
,
I0
,
I0
,
I0
),
a_thread_buf
);
static_for
<
0
,
NRepeat
,
1
>
{}([
&
](
auto
n0
)
{
// read B
static_for
<
0
,
KPerThread
,
KPack
>
{}([
&
](
auto
k
)
{
vector_type
<
FloatAB
,
KPack
>
a_thread_vec
;
vector_type
<
FloatAB
,
KPack
>
b_thread_vec
;
constexpr
index_t
k0
=
k
/
KPack
;
static_for
<
0
,
KPack
,
1
>
{}([
&
](
auto
i
)
{
a_thread_vec
.
template
AsType
<
FloatAB
>()(
i
)
=
a_thread_buf
[
Number
<
a_thread_desc_
.
CalculateOffset
(
make_tuple
(
0
,
0
,
0
,
k
+
i
))
>
{}];
b_thread_vec
.
template
AsType
<
FloatAB
>()(
i
)
=
b_thread_buf
[
Number
<
b_thread_desc_
.
CalculateOffset
(
make_tuple
(
k0
,
n0
,
i
))
>
{}];
});
using
mfma_input_type
=
typename
vector_type
<
FloatAB
,
xdlops_gemm
.
K1PerXdlops
>::
type
;
constexpr
index_t
c_offset
=
c_thread_desc_
.
CalculateOffset
(
make_tuple
(
m0
,
n0
,
0
));
xdlops_gemm
.
template
Run
(
a_thread_vec
.
template
AsType
<
mfma_input_type
>(),
b_thread_vec
.
template
AsType
<
mfma_input_type
>(),
c_thread_buf
.
GetVectorTypeReference
(
Number
<
c_offset
>{}));
});
});
});
}
private:
// A[M0, M1, M2, KPerThread]
static
constexpr
auto
a_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
I1
,
I1
,
I1
,
Number
<
KPerThread
>
{}));
// B[N0, N1, N2, KPerThread]
static
constexpr
auto
b_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
K0PerThread
>
{},
// KPerThread
Number
<
NRepeat
>
{},
// repeat
Number
<
KPack
>
{}));
// C[M, N, NumRegXdlops]
static
constexpr
auto
c_thread_desc_
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
MRepeat
>
{},
Number
<
NRepeat
>
{},
xdlops_gemm
.
GetRegSizePerXdlops
()));
using
AThreadCopy
=
ThreadwiseTensorSliceTransfer_v4
<
FloatAB
,
FloatAB
,
decltype
(
a_block_desc_m0_m1_m2_k
),
decltype
(
a_thread_desc_
),
Sequence
<
1
,
1
,
1
,
KPerThread
>
,
Sequence
<
0
,
1
,
2
,
3
>
,
3
,
A_K1
,
A_K1
>
;
AThreadCopy
a_thread_copy_
{
CalculateAThreadOriginDataIndex
()};
};
}
// namespace ck
include/ck/tensor_operation/gpu/block/blockwise_softmax.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/utility/data_type.hpp"
#include "ck/utility/reduction_common.hpp"
#include "ck/utility/reduction_operator.hpp"
#include "ck/utility/reduction_functions_accumulate.hpp"
#include "ck/tensor_operation/gpu/block/reduction_functions_blockwise.hpp"
#include "ck/tensor_operation/gpu/thread/reduction_functions_threadwise.hpp"
namespace
ck
{
template
<
index_t
BlockSize
,
typename
AccDataType
,
typename
ThreadMap_M_K
,
// thread_id to m_k
typename
ThreadClusterDesc_M_K
,
typename
ThreadSliceDesc_M_K
>
struct
BlockwiseSoftmax
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
static
constexpr
auto
I1
=
Number
<
1
>
{};
static
constexpr
index_t
MRepeat
=
ThreadSliceDesc_M_K
{}.
GetLength
(
I0
);
static
constexpr
index_t
KRepeat
=
ThreadSliceDesc_M_K
{}.
GetLength
(
I1
);
using
ThreadSliceDesc_M
=
decltype
(
make_naive_tensor_descriptor_packed
(
make_tuple
(
ThreadSliceDesc_M_K
{}.
GetLength
(
I0
))));
using
ThreadwiseMaxReduce
=
ThreadwiseReduction
<
AccDataType
,
ThreadSliceDesc_M_K
,
ThreadSliceDesc_M
,
reduce
::
Max
,
false
>
;
using
ThreadClusterLengths_M_K
=
decltype
(
ThreadClusterDesc_M_K
{}.
GetLengths
());
using
BlockwiseMaxReduce
=
PartitionedBlockwiseReduction_v2
<
AccDataType
,
BlockSize
,
ThreadClusterLengths_M_K
,
ThreadMap_M_K
,
reduce
::
Max
,
false
>
;
using
BlockwiseSumReduce
=
PartitionedBlockwiseReduction_v2
<
AccDataType
,
BlockSize
,
ThreadClusterLengths_M_K
,
ThreadMap_M_K
,
reduce
::
Add
,
false
>
;
using
ThreadwiseSumReduce
=
ThreadwiseReduction
<
AccDataType
,
ThreadSliceDesc_M_K
,
ThreadSliceDesc_M
,
reduce
::
Add
,
false
>
;
using
BufferType
=
StaticBuffer
<
AddressSpaceEnum
::
Vgpr
,
AccDataType
,
MRepeat
,
true
>
;
template
<
typename
CThreadBuffer
,
typename
WorkspaceBuffer
>
__host__
__device__
void
Run
(
CThreadBuffer
&
in_thread_buf
,
WorkspaceBuffer
&
reduce_work_buf
)
{
// find max value
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
I
)
{
max_value_buf
(
I
)
=
reduce
::
Max
::
template
GetIdentityValue
<
AccDataType
>();
});
ThreadwiseMaxReduce
::
Reduce
(
in_thread_buf
,
max_value_buf
);
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
I
)
{
BlockwiseMaxReduce
::
Reduce
(
reduce_work_buf
,
max_value_buf
(
I
));
block_sync_lds
();
});
// calculate exp for elements, P=exp(s-max)
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
iM
)
{
static_for
<
0
,
KRepeat
,
1
>
{}([
&
](
auto
iK
)
{
auto
offset
=
Number
<
ThreadSliceDesc_M_K
{}.
CalculateOffset
(
make_tuple
(
iM
,
iK
))
>
{};
in_thread_buf
(
offset
)
=
math
::
exp
(
in_thread_buf
[
offset
]
-
max_value_buf
(
iM
));
});
});
// sum data
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
I
)
{
sum_value_buf
(
I
)
=
reduce
::
Add
::
template
GetIdentityValue
<
AccDataType
>();
});
ThreadwiseSumReduce
::
Reduce
(
in_thread_buf
,
sum_value_buf
);
static_for
<
0
,
MRepeat
,
1
>
{}([
&
](
auto
I
)
{
BlockwiseSumReduce
::
Reduce
(
reduce_work_buf
,
sum_value_buf
(
I
));
block_sync_lds
();
});
}
BufferType
max_value_buf
;
BufferType
sum_value_buf
;
};
}
// namespace ck
include/ck/tensor_operation/gpu/block/blockwise_welford.hpp
0 → 100644
View file @
1dbdab56
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/tensor_description/cluster_descriptor.hpp"
#include "ck/utility/reduction_common.hpp"
namespace
ck
{
// clang-format off
// Assume:
// 1) work_buffer is buffer (typically LDS) allocated outside as workspace
// 2) work_buffer has T elements, and space size is no less than 3*BlockSize
// 3) mean_value, var_value and count is the input data in vgpr from each thread
// 4) mean_value, var_value and count is the over-written reduced output in vgpr for each thread
// 5) Merge mean and M from ThreadwiseWelford
// clang-format on
template
<
typename
T
,
index_t
BlockSize
,
typename
ThreadClusterLengths_M_K
,
typename
ThreadClusterArrangeOrder
,
bool
GetActualVariance
=
true
>
struct
BlockwiseWelford
{
static_assert
(
BlockSize
==
ThreadClusterLengths_M_K
::
At
(
0
)
*
ThreadClusterLengths_M_K
::
At
(
1
),
"The product of cluster lengths should be same as BlockSize!"
);
static
constexpr
auto
BufferLength_M
=
ThreadClusterLengths_M_K
::
At
(
0
);
static
constexpr
auto
BufferLength_K
=
ThreadClusterLengths_M_K
::
At
(
1
);
static
constexpr
auto
block_buf_desc_m_k
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
BufferLength_M
>
{},
Number
<
BufferLength_K
>
{}));
static
constexpr
auto
thread_cluster_desc
=
make_cluster_descriptor
(
ThreadClusterLengths_M_K
{},
ThreadClusterArrangeOrder
{});
__device__
static
inline
void
Merge
(
T
&
mean_a
,
T
&
var_a
,
int
&
count_a
,
T
mean_b
,
T
var_b
,
int
count_b
)
{
int
count
=
count_a
+
count_b
;
T
count_b_over_count
=
count
==
0
?
type_convert
<
T
>
(
0
)
:
type_convert
<
T
>
(
count_b
)
/
count
;
T
delta
=
mean_b
-
mean_a
;
mean_a
+=
delta
*
count_b_over_count
;
var_a
+=
var_b
+
delta
*
delta
*
count_a
*
count_b_over_count
;
count_a
=
count
;
}
__device__
static
void
Run
(
T
&
mean_value
,
T
&
var_value
,
int
&
count
)
{
__shared__
T
mean_block_buf
[
BlockSize
];
__shared__
T
var_block_buf
[
BlockSize
];
__shared__
int
count_block_buf
[
BlockSize
];
constexpr
auto
cluster_len_shift
=
get_shift
<
BufferLength_K
>
();
const
auto
thread_cluster_idx
=
thread_cluster_desc
.
CalculateBottomIndex
(
make_multi_index
(
get_thread_local_1d_id
()));
const
auto
thread_m_cluster_id
=
thread_cluster_idx
[
Number
<
0
>
{}];
const
auto
thread_k_cluster_id
=
thread_cluster_idx
[
Number
<
1
>
{}];
index_t
offset1
=
block_buf_desc_m_k
.
CalculateOffset
(
thread_cluster_idx
);
mean_block_buf
[
offset1
]
=
mean_value
;
var_block_buf
[
offset1
]
=
var_value
;
count_block_buf
[
offset1
]
=
count
;
block_sync_lds
();
static_for
<
0
,
cluster_len_shift
,
1
>
{}([
&
](
auto
I
)
{
constexpr
index_t
indOffset
=
1
<<
(
cluster_len_shift
-
1
-
I
());
if
(
thread_k_cluster_id
<
indOffset
)
{
index_t
offset2
=
block_buf_desc_m_k
.
CalculateOffset
(
thread_cluster_idx
+
make_tuple
(
0
,
indOffset
));
T
mean1
=
mean_block_buf
[
offset1
];
T
var1
=
var_block_buf
[
offset1
];
int
count1
=
count_block_buf
[
offset1
];
T
mean2
=
mean_block_buf
[
offset2
];
T
var2
=
var_block_buf
[
offset2
];
int
count2
=
count_block_buf
[
offset2
];
Merge
(
mean1
,
var1
,
count1
,
mean2
,
var2
,
count2
);
mean_block_buf
[
offset1
]
=
mean1
;
var_block_buf
[
offset1
]
=
var1
;
count_block_buf
[
offset1
]
=
count1
;
}
block_sync_lds
();
});
index_t
offset
=
block_buf_desc_m_k
.
CalculateOffset
(
make_tuple
(
thread_m_cluster_id
,
0
));
count
=
count_block_buf
[
offset
];
mean_value
=
mean_block_buf
[
offset
];
if
constexpr
(
GetActualVariance
)
var_value
=
var_block_buf
[
offset
]
/
count
;
else
var_value
=
var_block_buf
[
offset
];
};
};
}
// namespace ck
include/ck/tensor_operation/gpu/block/reduction_functions_blockwise.hpp
View file @
1dbdab56
...
...
@@ -82,6 +82,78 @@ struct PartitionedBlockwiseReduction
};
};
// clang-format off
// Assume:
// 1) work_buffer is buffer (typically LDS) allocated outside as workspace, does not include any in/out data
// 2) work_buffer has AccDataType elements, and space size is no less than BlockSize
// 3) in_out_value is the input data in vgpr from each thread
// 4) in_out_value is the over-written reduced output in vgpr for each thread
// clang-format on
template
<
typename
AccDataType
,
index_t
BlockSize
,
typename
ThreadClusterLengths_M_K
,
typename
ThreadClusterDesc
,
typename
OpReduce
,
bool
PropagateNan
,
typename
Accumulation
=
detail
::
AccumulateWithNanCheck
<
PropagateNan
,
OpReduce
,
AccDataType
>
>
struct
PartitionedBlockwiseReduction_v2
{
static_assert
(
BlockSize
==
ThreadClusterLengths_M_K
::
At
(
0
)
*
ThreadClusterLengths_M_K
::
At
(
1
),
"The product of cluster lengths should be same as BlockSize!"
);
static
constexpr
auto
BufferLength_M
=
ThreadClusterLengths_M_K
::
At
(
0
);
static
constexpr
auto
BufferLength_K
=
ThreadClusterLengths_M_K
::
At
(
1
);
static_assert
(
BufferLength_K
>
1
,
"Parallel reduction need work on at least two elements"
);
static
constexpr
auto
block_buf_desc_m_k
=
make_naive_tensor_descriptor_packed
(
make_tuple
(
Number
<
BufferLength_M
>
{},
Number
<
BufferLength_K
>
{}));
static
constexpr
auto
thread_cluster_desc
=
ThreadClusterDesc
{};
template
<
typename
BufferType
>
__device__
static
void
Reduce
(
BufferType
&
work_buffer
,
AccDataType
&
in_out_value
)
{
static_assert
(
is_same
<
typename
BufferType
::
type
,
AccDataType
>
{},
"Buffer data type should be consistent as AccDataType!"
);
constexpr
auto
cluster_len_shift
=
get_shift
<
BufferLength_K
>
();
const
auto
thread_cluster_idx
=
thread_cluster_desc
.
CalculateBottomIndex
(
make_multi_index
(
get_thread_local_1d_id
()));
const
auto
thread_m_cluster_id
=
thread_cluster_idx
[
Number
<
0
>
{}];
const
auto
thread_k_cluster_id
=
thread_cluster_idx
[
Number
<
1
>
{}];
work_buffer
(
block_buf_desc_m_k
.
CalculateOffset
(
thread_cluster_idx
))
=
in_out_value
;
__syncthreads
();
static_for
<
0
,
cluster_len_shift
,
1
>
{}([
&
](
auto
I
)
{
constexpr
index_t
indOffset
=
1
<<
(
cluster_len_shift
-
1
-
I
());
if
(
thread_k_cluster_id
<
indOffset
)
{
index_t
offset1
=
block_buf_desc_m_k
.
CalculateOffset
(
thread_cluster_idx
);
index_t
offset2
=
block_buf_desc_m_k
.
CalculateOffset
(
thread_cluster_idx
+
make_tuple
(
0
,
indOffset
));
AccDataType
opData1
=
work_buffer
[
offset1
];
AccDataType
opData2
=
work_buffer
[
offset2
];
Accumulation
::
Calculate
(
opData1
,
opData2
);
work_buffer
(
offset1
)
=
opData1
;
}
__syncthreads
();
});
index_t
offset
=
block_buf_desc_m_k
.
CalculateOffset
(
make_tuple
(
thread_m_cluster_id
,
0
));
in_out_value
=
work_buffer
[
offset
];
};
};
// clang-format off
// Assume:
// 1) work_val_buffer/work_idx_buffer is buffer (typically LDS) allocated outside as workspace, does not include any in/out data
...
...
include/ck/tensor_operation/gpu/device/device_5ary_elementwise.hpp
deleted
100644 → 0
View file @
d2e49b23
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iostream>
#include <sstream>
#include <vector>
#include "ck/utility/common_header.hpp"
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_operation/gpu/device/device_elementwise.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_5ary_Elementwise_1d.hpp"
#include "ck/host_utility/device_prop.hpp"
#include "ck/host_utility/kernel_launch.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
template
<
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
DDataType
,
typename
EDataType
,
typename
FDataType
,
typename
ComputeDataType
,
typename
ElementwiseFunctor
,
index_t
NDim
,
index_t
MPerThread
,
index_t
AScalarPerVector
,
index_t
BScalarPerVector
,
index_t
CScalarPerVector
,
index_t
DScalarPerVector
,
index_t
EScalarPerVector
,
index_t
FScalarPerVector
>
struct
Device5AryElementwise
:
public
DeviceElementwise
<
5
,
1
,
NDim
,
ElementwiseFunctor
>
{
static
constexpr
auto
I0
=
Number
<
0
>
{};
template
<
typename
Desc_M
>
static
auto
PadDescriptor_M_1d
(
Desc_M
desc_m
,
index_t
gridSize
,
index_t
blockSize
)
{
const
auto
m
=
desc_m
.
GetLength
(
I0
);
const
index_t
loop_step
=
gridSize
*
blockSize
*
MPerThread
;
const
auto
pad
=
math
::
integer_least_multiple
(
m
,
loop_step
)
-
m
;
const
auto
desc_m_pad
=
transform_tensor_descriptor
(
desc_m
,
make_tuple
(
make_right_pad_transform
(
m
,
pad
)),
make_tuple
(
Sequence
<
0
>
{}),
make_tuple
(
Sequence
<
0
>
{}));
return
desc_m_pad
;
}
static
auto
MakeDescriptor_M
(
const
std
::
vector
<
index_t
>&
lengths
,
const
std
::
vector
<
index_t
>&
stride
,
index_t
gridSize
,
index_t
blockSize
)
{
auto
tupleOfShape
=
generate_tuple
([
&
](
auto
I
)
{
return
lengths
[
I
];
},
Number
<
NDim
>
{});
auto
tupleOfStride
=
generate_tuple
([
&
](
auto
I
)
{
return
stride
[
I
];
},
Number
<
NDim
>
{});
// nd desc - [s0, s1, s2, ...]
const
auto
desc
=
make_naive_tensor_descriptor
(
tupleOfShape
,
tupleOfStride
);
// merge nd to 1d desc - [s0 * s1 * ...]
if
constexpr
(
NDim
>
1
)
{
const
auto
desc_m
=
transform_tensor_descriptor
(
desc
,
make_tuple
(
make_merge_transform
(
tupleOfShape
)),
make_tuple
(
generate_sequence_v2
([
&
](
auto
I
)
{
return
I
;
},
Number
<
NDim
>
{})),
make_tuple
(
Sequence
<
0
>
{}));
return
PadDescriptor_M_1d
(
desc_m
,
gridSize
,
blockSize
);
}
else
return
PadDescriptor_M_1d
(
desc
,
gridSize
,
blockSize
);
}
using
AGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
BGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
CGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
DGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
EGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
FGridDesc_M
=
decltype
(
MakeDescriptor_M
({
1
,
1
},
{
1
,
1
},
1
,
1
));
using
Gridwise5AryEltwise
=
Gridwise5AryElementwise_1D
<
ADataType
,
BDataType
,
CDataType
,
DDataType
,
EDataType
,
FDataType
,
ComputeDataType
,
AGridDesc_M
,
BGridDesc_M
,
CGridDesc_M
,
DGridDesc_M
,
EGridDesc_M
,
FGridDesc_M
,
ElementwiseFunctor
,
MPerThread
,
AScalarPerVector
,
BScalarPerVector
,
CScalarPerVector
,
DScalarPerVector
,
EScalarPerVector
,
FScalarPerVector
>
;
struct
Argument
:
public
BaseArgument
{
Argument
(
const
ADataType
*
p_a
,
const
BDataType
*
p_b
,
const
CDataType
*
p_c
,
const
DDataType
*
p_d
,
const
EDataType
*
p_e
,
FDataType
*
p_f
,
const
std
::
vector
<
index_t
>&
lengths
,
const
std
::
vector
<
index_t
>&
a_strides
,
const
std
::
vector
<
index_t
>&
b_strides
,
const
std
::
vector
<
index_t
>&
c_strides
,
const
std
::
vector
<
index_t
>&
d_strides
,
const
std
::
vector
<
index_t
>&
e_strides
,
const
std
::
vector
<
index_t
>&
f_strides
,
ElementwiseFunctor
functor
)
:
p_a_
(
p_a
),
p_b_
(
p_b
),
p_c_
(
p_c
),
p_d_
(
p_d
),
p_e_
(
p_e
),
p_f_
(
p_f
),
lengths_
(
lengths
),
a_strides_
(
a_strides
),
b_strides_
(
b_strides
),
c_strides_
(
c_strides
),
d_strides_
(
d_strides
),
e_strides_
(
e_strides
),
f_strides_
(
f_strides
),
functor_
(
functor
),
blockSize_
(
256
),
gridSize_
(
120
)
// FIXME - Calculate the grid size by number of CU in the future
{
a_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
a_strides
,
gridSize_
,
blockSize_
);
b_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
b_strides
,
gridSize_
,
blockSize_
);
c_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
c_strides
,
gridSize_
,
blockSize_
);
d_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
d_strides
,
gridSize_
,
blockSize_
);
e_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
e_strides
,
gridSize_
,
blockSize_
);
f_grid_desc_m_
=
MakeDescriptor_M
(
lengths
,
f_strides
,
gridSize_
,
blockSize_
);
}
const
ADataType
*
p_a_
;
const
BDataType
*
p_b_
;
const
CDataType
*
p_c_
;
const
DDataType
*
p_d_
;
const
EDataType
*
p_e_
;
FDataType
*
p_f_
;
std
::
vector
<
index_t
>
lengths_
;
AGridDesc_M
a_grid_desc_m_
;
BGridDesc_M
b_grid_desc_m_
;
CGridDesc_M
c_grid_desc_m_
;
DGridDesc_M
d_grid_desc_m_
;
EGridDesc_M
e_grid_desc_m_
;
FGridDesc_M
f_grid_desc_m_
;
std
::
vector
<
index_t
>
a_strides_
;
std
::
vector
<
index_t
>
b_strides_
;
std
::
vector
<
index_t
>
c_strides_
;
std
::
vector
<
index_t
>
d_strides_
;
std
::
vector
<
index_t
>
e_strides_
;
std
::
vector
<
index_t
>
f_strides_
;
ElementwiseFunctor
functor_
;
index_t
blockSize_
;
index_t
gridSize_
;
};
struct
Invoker
:
public
BaseInvoker
{
float
Run
(
const
Argument
&
arg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
{
const
auto
kernel
=
kernel_5ary_elementwise_1d
<
Gridwise5AryEltwise
,
ADataType
,
BDataType
,
CDataType
,
DDataType
,
EDataType
,
FDataType
,
AGridDesc_M
,
BGridDesc_M
,
CGridDesc_M
,
DGridDesc_M
,
EGridDesc_M
,
FGridDesc_M
,
ElementwiseFunctor
>
;
float
elapsed_time
=
launch_and_time_kernel
(
stream_config
,
kernel
,
dim3
(
arg
.
gridSize_
),
dim3
(
arg
.
blockSize_
),
0
,
arg
.
p_a_
,
arg
.
p_b_
,
arg
.
p_c_
,
arg
.
p_d_
,
arg
.
p_e_
,
arg
.
p_f_
,
arg
.
a_grid_desc_m_
,
arg
.
b_grid_desc_m_
,
arg
.
c_grid_desc_m_
,
arg
.
d_grid_desc_m_
,
arg
.
e_grid_desc_m_
,
arg
.
f_grid_desc_m_
,
arg
.
functor_
);
return
elapsed_time
;
}
// polymorphic
float
Run
(
const
BaseArgument
*
p_arg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
override
{
return
Run
(
*
dynamic_cast
<
const
Argument
*>
(
p_arg
),
stream_config
);
}
};
bool
IsSupportedArgument
(
const
BaseArgument
&
p_arg
)
{
return
IsSupportedArgument
(
&
p_arg
);
}
bool
IsSupportedArgument
(
const
BaseArgument
*
p_arg
)
override
{
const
Argument
*
pArg
=
dynamic_cast
<
const
Argument
*>
(
p_arg
);
if
(
pArg
==
nullptr
)
return
false
;
if
(
pArg
->
lengths_
.
size
()
!=
NDim
)
return
false
;
if
(
pArg
->
lengths_
.
back
()
%
MPerThread
!=
0
)
return
false
;
auto
IsScalarPerVectorValid
=
[](
bool
isLastDimensionCoalesced
,
int
scalarPerVector
)
{
bool
ret
=
true
;
if
(
!
isLastDimensionCoalesced
)
ret
=
scalarPerVector
==
1
;
else
ret
=
MPerThread
%
scalarPerVector
==
0
;
return
ret
;
};
if
(
!
IsScalarPerVectorValid
(
pArg
->
a_strides_
.
back
()
==
1
,
AScalarPerVector
))
return
false
;
if
(
!
IsScalarPerVectorValid
(
pArg
->
b_strides_
.
back
()
==
1
,
BScalarPerVector
))
return
false
;
if
(
!
IsScalarPerVectorValid
(
pArg
->
c_strides_
.
back
()
==
1
,
CScalarPerVector
))
return
false
;
if
(
!
IsScalarPerVectorValid
(
pArg
->
d_strides_
.
back
()
==
1
,
DScalarPerVector
))
return
false
;
if
(
!
IsScalarPerVectorValid
(
pArg
->
e_strides_
.
back
()
==
1
,
EScalarPerVector
))
return
false
;
if
(
!
IsScalarPerVectorValid
(
pArg
->
f_strides_
.
back
()
==
1
,
FScalarPerVector
))
return
false
;
return
true
;
};
static
auto
MakeArgument
(
std
::
array
<
const
void
*
,
5
>
p_inputs
,
std
::
array
<
void
*
,
1
>
p_outputs
,
std
::
vector
<
index_t
>
lengths
,
std
::
vector
<
index_t
>
a_strides
,
std
::
vector
<
index_t
>
b_strides
,
std
::
vector
<
index_t
>
c_strides
,
std
::
vector
<
index_t
>
d_strides
,
std
::
vector
<
index_t
>
e_strides
,
std
::
vector
<
index_t
>
f_strides
,
ElementwiseFunctor
functor
)
{
return
Argument
{
static_cast
<
const
ADataType
*>
(
p_inputs
[
0
]),
static_cast
<
const
BDataType
*>
(
p_inputs
[
1
]),
static_cast
<
const
CDataType
*>
(
p_inputs
[
2
]),
static_cast
<
const
DDataType
*>
(
p_inputs
[
3
]),
static_cast
<
const
EDataType
*>
(
p_inputs
[
4
]),
static_cast
<
FDataType
*>
(
p_outputs
[
0
]),
lengths
,
a_strides
,
b_strides
,
c_strides
,
d_strides
,
e_strides
,
f_strides
,
functor
};
}
std
::
unique_ptr
<
BaseArgument
>
MakeArgumentPointer
(
std
::
array
<
const
void
*
,
5
>
p_inputs
,
std
::
array
<
void
*
,
1
>
p_outputs
,
std
::
vector
<
index_t
>
lengths
,
std
::
vector
<
std
::
vector
<
index_t
>>
input_strides
,
std
::
vector
<
std
::
vector
<
index_t
>>
output_strides
,
ElementwiseFunctor
functor
)
override
{
return
std
::
make_unique
<
Argument
>
(
static_cast
<
const
ADataType
*>
(
p_inputs
[
0
]),
static_cast
<
const
BDataType
*>
(
p_inputs
[
1
]),
static_cast
<
const
CDataType
*>
(
p_inputs
[
2
]),
static_cast
<
const
DDataType
*>
(
p_inputs
[
3
]),
static_cast
<
const
EDataType
*>
(
p_inputs
[
4
]),
static_cast
<
FDataType
*>
(
p_outputs
[
0
]),
lengths
,
input_strides
[
0
],
input_strides
[
1
],
input_strides
[
2
],
input_strides
[
3
],
input_strides
[
4
],
output_strides
[
0
],
functor
);
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
std
::
unique_ptr
<
BaseInvoker
>
MakeInvokerPointer
()
override
{
return
std
::
make_unique
<
Invoker
>
();
}
// polymorphic
std
::
string
GetTypeString
()
const
override
{
auto
str
=
std
::
stringstream
();
// clang-format off
str
<<
"Device5aryElementwise"
<<
"<"
<<
"NDim = "
<<
NDim
<<
"MPerThread = "
<<
MPerThread
<<
"AScalarPerVector = "
<<
AScalarPerVector
<<
"BScalarPerVector = "
<<
BScalarPerVector
<<
"CScalarPerVector = "
<<
CScalarPerVector
<<
"DScalarPerVector = "
<<
DScalarPerVector
<<
"EScalarPerVector = "
<<
EScalarPerVector
<<
"FScalarPerVector = "
<<
FScalarPerVector
<<
">"
;
// clang-format on
return
str
.
str
();
}
};
// namespace device
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
Prev
1
2
3
4
5
6
7
8
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment