Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
f08fc17f
Commit
f08fc17f
authored
Mar 31, 2022
by
DouJS
Committed by
binmakeswell
Apr 06, 2022
Browse files
block_reduce.h fix format (#581)
parent
d2dc6049
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
20 additions
and
21 deletions
+20
-21
colossalai/kernel/cuda_native/csrc/kernels/include/block_reduce.h
...ai/kernel/cuda_native/csrc/kernels/include/block_reduce.h
+20
-21
No files found.
colossalai/kernel/cuda_native/csrc/kernels/include/block_reduce.h
View file @
f08fc17f
...
@@ -13,23 +13,22 @@ const float REDUCE_FLOAT_INF_NEG = -100000000.f;
...
@@ -13,23 +13,22 @@ const float REDUCE_FLOAT_INF_NEG = -100000000.f;
const
float
REDUCE_FLOAT_INF_POS
=
100000000.
f
;
const
float
REDUCE_FLOAT_INF_POS
=
100000000.
f
;
const
unsigned
int
WARP_REDUCE_SIZE
=
32
;
const
unsigned
int
WARP_REDUCE_SIZE
=
32
;
template
<
typename
T
>
template
<
typename
T
>
__forceinline__
__device__
T
warpReduceSum
(
T
val
)
{
__forceinline__
__device__
T
warpReduceSum
(
T
val
)
{
for
(
int
mask
=
(
WARP_REDUCE_SIZE
>>
1
);
mask
>
0
;
mask
>>=
1
)
for
(
int
mask
=
(
WARP_REDUCE_SIZE
>>
1
);
mask
>
0
;
mask
>>=
1
)
val
+=
__shfl_xor_sync
(
WARP_REDUCE_MASK
,
val
,
mask
,
WARP_REDUCE_SIZE
);
val
+=
__shfl_xor_sync
(
WARP_REDUCE_MASK
,
val
,
mask
,
WARP_REDUCE_SIZE
);
return
val
;
return
val
;
}
}
/* Calculate the sum of all elements in a block */
/* Calculate the sum of all elements in a block */
template
<
typename
T
>
template
<
typename
T
>
__forceinline__
__device__
T
blockReduceSum
(
T
val
)
{
__forceinline__
__device__
T
blockReduceSum
(
T
val
)
{
static
__shared__
T
shared
[
32
];
static
__shared__
T
shared
[
32
];
int
lane
=
threadIdx
.
x
&
0x1f
;
int
lane
=
threadIdx
.
x
&
0x1f
;
int
wid
=
threadIdx
.
x
>>
5
;
int
wid
=
threadIdx
.
x
>>
5
;
val
=
warpReduceSum
<
T
>
(
val
);
val
=
warpReduceSum
<
T
>
(
val
);
if
(
lane
==
0
)
shared
[
wid
]
=
val
;
if
(
lane
==
0
)
shared
[
wid
]
=
val
;
__syncthreads
();
__syncthreads
();
val
=
(
threadIdx
.
x
<
(
blockDim
.
x
>>
5
))
?
shared
[
lane
]
:
(
T
)
0.0
f
;
val
=
(
threadIdx
.
x
<
(
blockDim
.
x
>>
5
))
?
shared
[
lane
]
:
(
T
)
0.0
f
;
...
@@ -57,10 +56,10 @@ __inline__ __device__ void warpReduce<ReduceType::kMax, 1>(float *pval) {
...
@@ -57,10 +56,10 @@ __inline__ __device__ void warpReduce<ReduceType::kMax, 1>(float *pval) {
template
<
>
template
<
>
__inline__
__device__
void
warpReduce
<
ReduceType
::
kMax
,
2
>
(
float
*
pval
)
{
__inline__
__device__
void
warpReduce
<
ReduceType
::
kMax
,
2
>
(
float
*
pval
)
{
float
val0_tmp
,
val1_tmp
;
float
val0_tmp
,
val1_tmp
;
#define WarpReduceMaxOneStep(a, b) \
#define WarpReduceMaxOneStep(a, b)
\
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval), a, b); \
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval), a, b);
\
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b); \
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b);
\
*(pval) = max(val0_tmp, *(pval)); \
*(pval) = max(val0_tmp, *(pval));
\
*(pval + 1) = max(val1_tmp, *(pval + 1));
*(pval + 1) = max(val1_tmp, *(pval + 1));
WarpReduceMaxOneStep
(
16
,
32
);
WarpReduceMaxOneStep
(
16
,
32
);
...
@@ -89,10 +88,10 @@ __inline__ __device__ void warpReduce<ReduceType::kSum, 1>(float *pval) {
...
@@ -89,10 +88,10 @@ __inline__ __device__ void warpReduce<ReduceType::kSum, 1>(float *pval) {
template
<
>
template
<
>
__inline__
__device__
void
warpReduce
<
ReduceType
::
kSum
,
2
>
(
float
*
pval
)
{
__inline__
__device__
void
warpReduce
<
ReduceType
::
kSum
,
2
>
(
float
*
pval
)
{
float
val0_tmp
,
val1_tmp
;
float
val0_tmp
,
val1_tmp
;
#define WarpReduceSumOneStep(a, b) \
#define WarpReduceSumOneStep(a, b)
\
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 0), a, b); \
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 0), a, b);
\
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b); \
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b);
\
*(pval + 0) += val0_tmp; \
*(pval + 0) += val0_tmp;
\
*(pval + 1) += val1_tmp
*(pval + 1) += val1_tmp
WarpReduceSumOneStep
(
16
,
32
);
WarpReduceSumOneStep
(
16
,
32
);
...
@@ -107,14 +106,14 @@ __inline__ __device__ void warpReduce<ReduceType::kSum, 2>(float *pval) {
...
@@ -107,14 +106,14 @@ __inline__ __device__ void warpReduce<ReduceType::kSum, 2>(float *pval) {
template
<
>
template
<
>
__inline__
__device__
void
warpReduce
<
ReduceType
::
kSum
,
4
>
(
float
*
pval
)
{
__inline__
__device__
void
warpReduce
<
ReduceType
::
kSum
,
4
>
(
float
*
pval
)
{
float
val0_tmp
,
val1_tmp
,
val2_tmp
,
val3_tmp
;
float
val0_tmp
,
val1_tmp
,
val2_tmp
,
val3_tmp
;
#define WarpReduceSumOneStep(a, b) \
#define WarpReduceSumOneStep(a, b)
\
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 0), a, b); \
val0_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 0), a, b);
\
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b); \
val1_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 1), a, b);
\
val2_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 2), a, b); \
val2_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 2), a, b);
\
val3_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 3), a, b); \
val3_tmp = __shfl_xor_sync(WARP_REDUCE_MASK, *(pval + 3), a, b);
\
*(pval + 0) += val0_tmp; \
*(pval + 0) += val0_tmp;
\
*(pval + 1) += val1_tmp; \
*(pval + 1) += val1_tmp;
\
*(pval + 2) += val2_tmp; \
*(pval + 2) += val2_tmp;
\
*(pval + 3) += val3_tmp
*(pval + 3) += val3_tmp
WarpReduceSumOneStep
(
16
,
32
);
WarpReduceSumOneStep
(
16
,
32
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment