reduce.cuh 2.26 KB
Newer Older
1
2
3
#ifndef __INFINIOP_REDUCE_CUDA_H__
#define __INFINIOP_REDUCE_CUDA_H__

4
5
6
7
8
/*
 * Device functions for reduction operations on CUDA.
 *
 * Note: Only local result on thread 0 is guranteed to be correct.
 *       A manual broadcast is needed for other threads.
YdrMaster's avatar
YdrMaster committed
9
10
11
12
13
 *
 * Important Note: This is a device-independent header file containing reduce kernels
 *                 for all cuda-supporting platforms. Include device-specific headers
 *                 (such as <cub/block/block_reduce.cuh> for nvidia) in your source file
 *                 and then include this file for proper usage.
14
 */
15
16
namespace op::common_cuda::reduce_op {

17
// Sum(x^2) on contiguous data of length count
18
19
20
21
22
23
template <unsigned int BLOCK_SIZE, typename Tdata, typename Tcompute>
__device__ __forceinline__ Tcompute sumSquared(const Tdata *data_ptr, size_t count) {
    Tcompute ss = 0;

    // Each thread computes its partial sum
    for (size_t i = threadIdx.x; i < count; i += BLOCK_SIZE) {
PanZezhong's avatar
PanZezhong committed
24
        ss += Tcompute(data_ptr[i]) * Tcompute(data_ptr[i]);
25
26
27
28
29
30
31
32
33
    }

    // Use CUB block-level reduction
    using BlockReduce = cub::BlockReduce<Tcompute, BLOCK_SIZE>;
    __shared__ typename BlockReduce::TempStorage temp_storage;

    return BlockReduce(temp_storage).Sum(ss);
}

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
// Sum(x) on contiguous data of length count
template <unsigned int BLOCK_SIZE, typename Tdata, typename Tcompute>
__device__ __forceinline__ Tcompute sum(const Tdata *data_ptr, size_t count) {
    Tcompute s = 0;

    for (size_t i = threadIdx.x; i < count; i += BLOCK_SIZE) {
        s += Tcompute(data_ptr[i]);
    }

    using BlockReduce = cub::BlockReduce<Tcompute, BLOCK_SIZE>;
    __shared__ typename BlockReduce::TempStorage temp_storage;

    return BlockReduce(temp_storage).Sum(s);
}

// Max(x) on contiguous data of length count
template <unsigned int BLOCK_SIZE, typename Tdata>
__device__ __forceinline__ Tdata max(const Tdata *data_ptr, size_t count) {
    Tdata max_ = data_ptr[0];

    for (size_t i = threadIdx.x; i < count; i += BLOCK_SIZE) {
        max_ = cub::Max()(max_, data_ptr[i]);
    }

    using BlockReduce = cub::BlockReduce<Tdata, BLOCK_SIZE>;
    __shared__ typename BlockReduce::TempStorage temp_storage;

    return BlockReduce(temp_storage).Reduce(max_, cub::Max(), BLOCK_SIZE);
}

64
65
66
} // namespace op::common_cuda::reduce_op

#endif