"git@developer.sourcefind.cn:OpenDAS/torch-harmonics.git" did not exist on "214fa40aba94468ab4ddd50f4b3544b1780b87b3"
array_cumsum.cu 1.54 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
/*!
 *  Copyright (c) 2020 by Contributors
 * \file array/cpu/array_cumsum.cu
 * \brief Array cumsum GPU implementation
 */
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./utils.h"
#include "./dgl_cub.cuh"

namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {

template <DLDeviceType XPU, typename IdType>
IdArray CumSum(IdArray array, bool prepend_zero) {
  const int64_t len = array.NumElements();
  if (len == 0)
    return !prepend_zero ? array : aten::Full(0, 1, array->dtype.bits, array->ctx);

  auto device = runtime::DeviceAPI::Get(array->ctx);
  hipStream_t stream = runtime::getCurrentCUDAStream();
  const IdType* in_d = array.Ptr<IdType>();
  IdArray ret;
  IdType* out_d = nullptr;
  if (prepend_zero) {
    ret = aten::Full(0, len + 1, array->dtype.bits, array->ctx);
    out_d = ret.Ptr<IdType>() + 1;
  } else {
    ret = aten::NewIdArray(len, array->ctx, array->dtype.bits);
    out_d = ret.Ptr<IdType>();
  }
  // Allocate workspace
  size_t workspace_size = 0;
  CUDA_CALL(hipcub::DeviceScan::InclusiveSum(
      nullptr, workspace_size, in_d, out_d, len, stream));
  void* workspace = device->AllocWorkspace(array->ctx, workspace_size);

  // Compute cumsum
  CUDA_CALL(hipcub::DeviceScan::InclusiveSum(
      workspace, workspace_size, in_d, out_d, len, stream));

  device->FreeWorkspace(array->ctx, workspace);

  return ret;
}

template IdArray CumSum<kDLGPU, int32_t>(IdArray, bool);
template IdArray CumSum<kDLGPU, int64_t>(IdArray, bool);

}  // namespace impl
}  // namespace aten
}  // namespace dgl