utils.h 8.48 KB
Newer Older
1
/**
2
 *  Copyright (c) 2020 by Contributors
3
4
 * @file array/cuda/utils.h
 * @brief Utilities for CUDA kernels.
5
 */
6
7
#ifndef DGL_ARRAY_CUDA_UTILS_H_
#define DGL_ARRAY_CUDA_UTILS_H_
8

9
#include <dgl/runtime/c_runtime_api.h>
10
11
#include <dgl/runtime/device_api.h>
#include <dgl/runtime/ndarray.h>
12
13
#include <dmlc/logging.h>

14
#include "../../runtime/cuda/cuda_common.h"
15
#include "dgl_cub.cuh"
16
17
18
19
20
21
22

namespace dgl {
namespace cuda {

#define CUDA_MAX_NUM_BLOCKS_X 0x7FFFFFFF
#define CUDA_MAX_NUM_BLOCKS_Y 0xFFFF
#define CUDA_MAX_NUM_BLOCKS_Z 0xFFFF
23
24
// The max number of threads per block
#define CUDA_MAX_NUM_THREADS 256
25

26
/** @brief Calculate the number of threads needed given the dimension length.
27
28
29
30
31
 *
 * It finds the biggest number that is smaller than min(dim, max_nthrs)
 * and is also power of two.
 */
inline int FindNumThreads(int dim, int max_nthrs = CUDA_MAX_NUM_THREADS) {
32
  CHECK_GE(dim, 0);
33
  if (dim == 0) return 1;
34
35
36
37
38
39
40
  int ret = max_nthrs;
  while (ret > dim) {
    ret = ret >> 1;
  }
  return ret;
}

41
42
43
44
45
46
47
48
template <typename T>
int _NumberOfBits(const T& range) {
  if (range <= 1) {
    // ranges of 0 or 1 require no bits to store
    return 0;
  }

  int bits = 1;
49
  while (bits < static_cast<int>(sizeof(T) * 8) && (1ull << bits) < range) {
50
51
52
    ++bits;
  }

53
54
55
  if (bits < static_cast<int>(sizeof(T) * 8)) {
    CHECK_EQ((range - 1) >> bits, 0);
  }
56
57
58
59
60
  CHECK_NE((range - 1) >> (bits - 1), 0);

  return bits;
}

61
/**
62
 * @brief Find number of blocks is smaller than nblks and max_nblks
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
 * on the given axis ('x', 'y' or 'z').
 */
template <char axis>
inline int FindNumBlocks(int nblks, int max_nblks = -1) {
  int default_max_nblks = -1;
  switch (axis) {
    case 'x':
      default_max_nblks = CUDA_MAX_NUM_BLOCKS_X;
      break;
    case 'y':
      default_max_nblks = CUDA_MAX_NUM_BLOCKS_Y;
      break;
    case 'z':
      default_max_nblks = CUDA_MAX_NUM_BLOCKS_Z;
      break;
    default:
      LOG(FATAL) << "Axis " << axis << " not recognized";
      break;
  }
82
  if (max_nblks == -1) max_nblks = default_max_nblks;
83
  CHECK_NE(nblks, 0);
84
  if (nblks < max_nblks) return nblks;
85
86
87
88
89
90
91
92
93
94
95
96
  return max_nblks;
}

template <typename T>
__device__ __forceinline__ T _ldg(T* addr) {
#if __CUDA_ARCH__ >= 350
  return __ldg(addr);
#else
  return *addr;
#endif
}

97
/**
98
 * @brief Return true if the given bool flag array is all true.
99
100
 * The input bool array is in int8_t type so it is aligned with byte address.
 *
101
102
103
104
 * @param flags The bool array.
 * @param length The length.
 * @param ctx Device context.
 * @return True if all the flags are true.
105
 */
106
bool AllTrue(int8_t* flags, int64_t length, const DGLContext& ctx);
107

108
/**
109
 * @brief CUDA Kernel of filling the vector started from ptr of size length
110
 *        with val.
111
 * @note internal use only.
112
113
114
115
116
117
118
119
120
121
122
 */
template <typename DType>
__global__ void _FillKernel(DType* ptr, size_t length, DType val) {
  int tx = blockIdx.x * blockDim.x + threadIdx.x;
  int stride_x = gridDim.x * blockDim.x;
  while (tx < length) {
    ptr[tx] = val;
    tx += stride_x;
  }
}

123
/** @brief Fill the vector started from ptr of size length with val */
124
125
template <typename DType>
void _Fill(DType* ptr, size_t length, DType val) {
126
  cudaStream_t stream = runtime::getCurrentCUDAStream();
127
  int nt = FindNumThreads(length);
128
129
  int nb =
      (length + nt - 1) / nt;  // on x-axis, no need to worry about upperbound.
130
  CUDA_KERNEL_CALL(cuda::_FillKernel, nb, nt, 0, stream, ptr, length, val);
131
132
}

133
/**
134
 * @brief Search adjacency list linearly for each (row, col) pair and
135
136
137
138
139
140
141
142
143
 * write the data under the matched position in the indices array to the output.
 *
 * If there is no match, the value in \c filler is written.
 * If there are multiple matches, only the first match is written.
 * If the given data array is null, write the matched position to the output.
 */
template <typename IdType, typename DType>
__global__ void _LinearSearchKernel(
    const IdType* indptr, const IdType* indices, const IdType* data,
144
145
146
    const IdType* row, const IdType* col, int64_t row_stride,
    int64_t col_stride, int64_t length, const DType* weights, DType filler,
    DType* out) {
147
148
149
150
151
152
153
154
155
156
157
158
  int tx = blockIdx.x * blockDim.x + threadIdx.x;
  const int stride_x = gridDim.x * blockDim.x;
  while (tx < length) {
    int rpos = tx * row_stride, cpos = tx * col_stride;
    IdType v = -1;
    const IdType r = row[rpos], c = col[cpos];
    for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) {
      if (indices[i] == c) {
        v = data ? data[i] : i;
        break;
      }
    }
159
    if (v == -1) {
160
      out[tx] = filler;
161
162
163
164
165
166
167
168
    } else {
      // The casts here are to be able to handle DType being __half.
      // GCC treats int64_t as a distinct type from long long, so
      // without the explcit cast to long long, it errors out saying
      // that the implicit cast results in an ambiguous choice of
      // constructor for __half.
      // The using statement is to avoid a linter error about using
      // long or long long.
169
      using LongLong = long long;  // NOLINT
170
171
      out[tx] = weights ? weights[v] : DType(LongLong(v));
    }
172
173
174
175
    tx += stride_x;
  }
}

176
#if BF16_ENABLED
177
/**
178
 * @brief Specialization for bf16 because conversion from long long to bfloat16
179
180
181
182
183
 * doesn't exist before SM80.
 */
template <typename IdType>
__global__ void _LinearSearchKernel(
    const IdType* indptr, const IdType* indices, const IdType* data,
184
185
186
    const IdType* row, const IdType* col, int64_t row_stride,
    int64_t col_stride, int64_t length, const __nv_bfloat16* weights,
    __nv_bfloat16 filler, __nv_bfloat16* out) {
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  int tx = blockIdx.x * blockDim.x + threadIdx.x;
  const int stride_x = gridDim.x * blockDim.x;
  while (tx < length) {
    int rpos = tx * row_stride, cpos = tx * col_stride;
    IdType v = -1;
    const IdType r = row[rpos], c = col[cpos];
    for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) {
      if (indices[i] == c) {
        v = data ? data[i] : i;
        break;
      }
    }
    if (v == -1) {
      out[tx] = filler;
    } else {
202
203
      // If the result is saved in bf16, it should be fine to convert it to
      // float first
204
205
206
207
208
209
210
      out[tx] = weights ? weights[v] : __nv_bfloat16(static_cast<float>(v));
    }
    tx += stride_x;
  }
}
#endif  // BF16_ENABLED

211
212
template <typename DType>
inline DType GetCUDAScalar(
213
    runtime::DeviceAPI* device_api, DGLContext ctx, const DType* cuda_ptr) {
214
215
  DType result;
  device_api->CopyDataFromTo(
216
      cuda_ptr, 0, &result, 0, sizeof(result), ctx, DGLContext{kDGLCPU, 0},
217
      DGLDataTypeTraits<DType>::dtype);
218
219
220
  return result;
}

221
/**
222
 * @brief Given a sorted array and a value this function returns the index
223
224
225
226
227
228
229
230
231
232
 * of the first element which compares greater than value.
 *
 * This function assumes 0-based index
 * @param A: ascending sorted array
 * @param n: size of the A
 * @param x: value to search in A
 * @return index, i, of the first element st. A[i]>x. If x>=A[n-1] returns n.
 * if x<A[0] then it returns 0.
 */
template <typename IdType>
233
__device__ IdType _UpperBound(const IdType* A, int64_t n, IdType x) {
234
235
  IdType l = 0, r = n, m = 0;
  while (l < r) {
236
    m = l + (r - l) / 2;
237
    if (x >= A[m]) {
238
      l = m + 1;
239
240
241
242
243
244
245
    } else {
      r = m;
    }
  }
  return l;
}

246
/**
247
 * @brief Given a sorted array and a value this function returns the index
248
249
250
251
252
253
254
255
256
 * of the element who is equal to val. If not exist returns n+1
 *
 * This function assumes 0-based index
 * @param A: ascending sorted array
 * @param n: size of the A
 * @param x: value to search in A
 * @return index, i, st. A[i]==x. If such an index not exists returns 'n'.
 */
template <typename IdType>
257
258
__device__ IdType _BinarySearch(const IdType* A, int64_t n, IdType x) {
  IdType l = 0, r = n - 1, m = 0;
259
  while (l <= r) {
260
    m = l + (r - l) / 2;
261
262
263
264
    if (A[m] == x) {
      return m;
    }
    if (A[m] < x) {
265
      l = m + 1;
266
    } else {
267
      r = m - 1;
268
269
270
271
272
    }
  }
  return n;  // not found
}

273
274
template <typename DType, typename BoolType>
void MaskSelect(
275
276
277
    runtime::DeviceAPI* device, const DGLContext& ctx, const DType* input,
    const BoolType* mask, DType* output, int64_t n, int64_t* rst,
    cudaStream_t stream) {
278
279
280
281
282
283
284
285
286
  size_t workspace_size = 0;
  CUDA_CALL(cub::DeviceSelect::Flagged(
      nullptr, workspace_size, input, mask, output, rst, n, stream));
  void* workspace = device->AllocWorkspace(ctx, workspace_size);
  CUDA_CALL(cub::DeviceSelect::Flagged(
      workspace, workspace_size, input, mask, output, rst, n, stream));
  device->FreeWorkspace(ctx, workspace);
}

287
288
289
290
291
292
293
294
inline void* GetDevicePointer(runtime::NDArray array) {
  void* ptr = array->data;
  if (array.IsPinned()) {
    CUDA_CALL(cudaHostGetDevicePointer(&ptr, ptr, 0));
  }
  return ptr;
}

295
296
297
}  // namespace cuda
}  // namespace dgl

298
#endif  // DGL_ARRAY_CUDA_UTILS_H_