segment_reduce.cu 5.58 KB
Newer Older
1
2
3
4
5
6
/*!
 *  Copyright (c) 2020 by Contributors
 * \file array/cuda/segment_reduce.cu
 * \brief Segment reduce C APIs and definitions.
 */
#include <dgl/array.h>
7
#include <dgl/base_heterograph.h>
8
9
#include "./segment_reduce.cuh"
#include "./functor.cuh"
10
#include "./utils.h"
11

12

13
14
15
16
17
18
namespace dgl {

using namespace cuda;

namespace aten {

19
20

template <int XPU, typename IdType, int bits>
21
22
23
24
25
void SegmentReduce(const std::string& op,
                   NDArray feat,
                   NDArray offsets,
                   NDArray out,
                   NDArray arg) {
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  SWITCH_BITS(bits, DType, {
    if (op == "sum") {
      cuda::SegmentReduce<IdType, DType, cuda::reduce::Sum<IdType, DType>>(
          feat, offsets, out, arg);
    } else if (op == "max") {
      cuda::SegmentReduce<IdType, DType, cuda::reduce::Max<IdType, DType>>(
          feat, offsets, out, arg);
    } else if (op == "min") {
      cuda::SegmentReduce<IdType, DType, cuda::reduce::Min<IdType, DType>>(
          feat, offsets, out, arg);
    } else {
      LOG(FATAL) << "Not implemented";
    }
  });
40
41
}

42

43
44
45
46
47
48
49
50
51
52
template <int XPU, typename IdType, int bits>
void ScatterAdd(NDArray feat,
                NDArray idx,
                NDArray out) {
  SWITCH_BITS(bits, DType, {
    cuda::ScatterAdd<IdType, DType>(feat, idx, out);
  });
}


53
54
55
56
57
58
59
60
61
62
63
64
65
template <int XPU, typename IdType, int bits>
void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
                const std::string& op,
                const std::vector<NDArray>& feat,
                const std::vector<NDArray>& idx,
                const std::vector<NDArray>& idx_etype,
                std::vector<NDArray>* out) {
  SWITCH_BITS(bits, DType, {
    LOG(FATAL) << "Not implemented. Please use CPU version.";
  });
}


66
template <int XPU, typename IdType, int bits>
67
68
69
void BackwardSegmentCmp(NDArray feat,
                        NDArray arg,
                        NDArray out) {
70
71
72
  SWITCH_BITS(bits, DType, {
    cuda::BackwardSegmentCmp<IdType, DType>(feat, arg, out);
  });
73
74
}

75
76

template void SegmentReduce<kDLGPU, int32_t, 16>(
77
78
79
80
81
    const std::string& op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
82
template void SegmentReduce<kDLGPU, int64_t, 16>(
83
84
85
86
87
    const std::string &op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
88
89
90
91
92
93
94
template void SegmentReduce<kDLGPU, int32_t, 32>(
    const std::string& op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
template void SegmentReduce<kDLGPU, int64_t, 32>(
95
96
97
98
99
    const std::string &op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
100
template void SegmentReduce<kDLGPU, int32_t, 64>(
101
102
103
104
105
    const std::string &op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
106
107
108
109
110
111
template void SegmentReduce<kDLGPU, int64_t, 64>(
    const std::string &op,
    NDArray feat,
    NDArray offsets,
    NDArray out,
    NDArray arg);
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
template void ScatterAdd<kDLGPU, int32_t, 16>(
    NDArray feat,
    NDArray idx,
    NDArray out);
template void ScatterAdd<kDLGPU, int64_t, 16>(
    NDArray feat,
    NDArray idx,
    NDArray out);
template void ScatterAdd<kDLGPU, int32_t, 32>(
    NDArray feat,
    NDArray idx,
    NDArray out);
template void ScatterAdd<kDLGPU, int64_t, 32>(
    NDArray feat,
    NDArray idx,
    NDArray out);
template void ScatterAdd<kDLGPU, int32_t, 64>(
    NDArray feat,
    NDArray idx,
    NDArray out);
template void ScatterAdd<kDLGPU, int64_t, 64>(
    NDArray feat,
    NDArray idx,
    NDArray out);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

template void UpdateGradMinMax_hetero<kDLGPU, int32_t, 16>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);
template void UpdateGradMinMax_hetero<kDLGPU, int64_t, 16>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);
template void UpdateGradMinMax_hetero<kDLGPU, int32_t, 32>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);
template void UpdateGradMinMax_hetero<kDLGPU, int64_t, 32>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);
template void UpdateGradMinMax_hetero<kDLGPU, int32_t, 64>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);
template void UpdateGradMinMax_hetero<kDLGPU, int64_t, 64>(
    const HeteroGraphPtr& g, const std::string& op,
    const std::vector<NDArray>& feat, const std::vector<NDArray>& idx,
    const std::vector<NDArray>& idx_etype, std::vector<NDArray>* out);

162
163
164
165
166
167
168
169
170
template void BackwardSegmentCmp<kDLGPU, int32_t, 16>(
    NDArray feat,
    NDArray arg,
    NDArray out);
template void BackwardSegmentCmp<kDLGPU, int64_t, 16>(
    NDArray feat,
    NDArray arg,
    NDArray out);
template void BackwardSegmentCmp<kDLGPU, int32_t, 32>(
171
172
173
    NDArray feat,
    NDArray arg,
    NDArray out);
174
template void BackwardSegmentCmp<kDLGPU, int64_t, 32>(
175
176
177
    NDArray feat,
    NDArray arg,
    NDArray out);
178
template void BackwardSegmentCmp<kDLGPU, int32_t, 64>(
179
180
181
    NDArray feat,
    NDArray arg,
    NDArray out);
182
template void BackwardSegmentCmp<kDLGPU, int64_t, 64>(
183
184
185
186
187
188
    NDArray feat,
    NDArray arg,
    NDArray out);

}  // namespace aten
}  // namespace dgl