csr2coo.cu 5.24 KB
Newer Older
1
2
3
4
5
6
/*!
 *  Copyright (c) 2020 by Contributors
 * \file array/cuda/csr2coo.cc
 * \brief CSR2COO
 */
#include <dgl/array.h>
7

8
#include "../../runtime/cuda/cuda_common.h"
9
#include "./utils.h"
10
11
12
13
14
15
16
17

namespace dgl {

using runtime::NDArray;

namespace aten {
namespace impl {

18
template <DGLDeviceType XPU, typename IdType>
19
COOMatrix CSRToCOO(CSRMatrix csr) {
20
21
22
23
24
  LOG(FATAL) << "Unreachable codes";
  return {};
}

template <>
25
COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr) {
26
  auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
27
  cudaStream_t stream = runtime::getCurrentCUDAStream();
28
29
30
31
  // allocate cusparse handle if needed
  if (!thr_entry->cusparse_handle) {
    CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
  }
32
  CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, stream));
33
34
35

  NDArray indptr = csr.indptr, indices = csr.indices, data = csr.data;
  const int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data);
36
37
  NDArray row =
      aten::NewIdArray(indices->shape[0], indptr->ctx, indptr->dtype.bits);
38
39
40
  int32_t* row_ptr = static_cast<int32_t*>(row->data);

  CUSPARSE_CALL(cusparseXcsr2coo(
41
42
43
44
45
      thr_entry->cusparse_handle, indptr_ptr, indices->shape[0], csr.num_rows,
      row_ptr, CUSPARSE_INDEX_BASE_ZERO));

  return COOMatrix(
      csr.num_rows, csr.num_cols, row, indices, data, true, csr.sorted);
46
47
}

48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
/*!
 * \brief Repeat elements
 * \param val Value to repeat
 * \param repeats Number of repeats for each value
 * \param pos The position of the output buffer to write the value.
 * \param out Output buffer.
 * \param length Number of values
 *
 * For example:
 * val = [3, 0, 1]
 * repeats = [1, 0, 2]
 * pos = [0, 1, 1]  # write to output buffer position 0, 1, 1
 * then,
 * out = [3, 1, 1]
 */
template <typename DType, typename IdType>
__global__ void _RepeatKernel(
65
66
    const DType* val, const IdType* pos, DType* out, int64_t n_row,
    int64_t length) {
67
  IdType tx = static_cast<IdType>(blockIdx.x) * blockDim.x + threadIdx.x;
68
69
  const int stride_x = gridDim.x * blockDim.x;
  while (tx < length) {
70
71
    IdType i = dgl::cuda::_UpperBound(pos, n_row, tx) - 1;
    out[tx] = val[i];
72
73
74
75
76
    tx += stride_x;
  }
}

template <>
77
COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr) {
78
  const auto& ctx = csr.indptr->ctx;
79
80
  cudaStream_t stream = runtime::getCurrentCUDAStream();

81
82
83
84
85
  const int64_t nnz = csr.indices->shape[0];
  const auto nbits = csr.indptr->dtype.bits;
  IdArray rowids = Range(0, csr.num_rows, nbits, ctx);
  IdArray ret_row = NewIdArray(nnz, ctx, nbits);

86
87
  const int nt = 256;
  const int nb = (nnz + nt - 1) / nt;
88
89
90
91
92
93
94
  CUDA_KERNEL_CALL(
      _RepeatKernel, nb, nt, 0, stream, rowids.Ptr<int64_t>(),
      csr.indptr.Ptr<int64_t>(), ret_row.Ptr<int64_t>(), csr.num_rows, nnz);

  return COOMatrix(
      csr.num_rows, csr.num_cols, ret_row, csr.indices, csr.data, true,
      csr.sorted);
95
96
}

97
98
template COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOO<kDGLCUDA, int64_t>(CSRMatrix csr);
99

100
template <DGLDeviceType XPU, typename IdType>
101
COOMatrix CSRToCOODataAsOrder(CSRMatrix csr) {
102
103
104
105
106
  LOG(FATAL) << "Unreachable codes";
  return {};
}

template <>
107
108
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr) {
  COOMatrix coo = CSRToCOO<kDGLCUDA, int32_t>(csr);
109
  if (aten::IsNullArray(coo.data)) return coo;
110
111
112

  auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
  auto device = runtime::DeviceAPI::Get(coo.row->ctx);
113
  cudaStream_t stream = runtime::getCurrentCUDAStream();
114
115
116
117
  // allocate cusparse handle if needed
  if (!thr_entry->cusparse_handle) {
    CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
  }
118
  CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, stream));
119
120
121
122
123
124
125
126

  NDArray row = coo.row, col = coo.col, data = coo.data;
  int32_t* row_ptr = static_cast<int32_t*>(row->data);
  int32_t* col_ptr = static_cast<int32_t*>(col->data);
  int32_t* data_ptr = static_cast<int32_t*>(data->data);

  size_t workspace_size = 0;
  CUSPARSE_CALL(cusparseXcoosort_bufferSizeExt(
127
128
      thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
      data_ptr, row_ptr, &workspace_size));
129
130
  void* workspace = device->AllocWorkspace(row->ctx, workspace_size);
  CUSPARSE_CALL(cusparseXcoosortByRow(
131
132
      thr_entry->cusparse_handle, coo.num_rows, coo.num_cols, row->shape[0],
      data_ptr, row_ptr, col_ptr, workspace));
133
134
  device->FreeWorkspace(row->ctx, workspace);

135
136
137
  // The row and column field have already been reordered according
  // to data, thus the data field will be deprecated.
  coo.data = aten::NullArray();
138
139
140
141
142
143
  coo.row_sorted = false;
  coo.col_sorted = false;
  return coo;
}

template <>
144
145
COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr) {
  COOMatrix coo = CSRToCOO<kDGLCUDA, int64_t>(csr);
146
  if (aten::IsNullArray(coo.data)) return coo;
147
148
149
150
151
152
153
154
155
156
  const auto& sorted = Sort(coo.data);

  coo.row = IndexSelect(coo.row, sorted.second);
  coo.col = IndexSelect(coo.col, sorted.second);

  // The row and column field have already been reordered according
  // to data, thus the data field will be deprecated.
  coo.data = aten::NullArray();
  coo.row_sorted = false;
  coo.col_sorted = false;
157
158
159
  return coo;
}

160
161
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOODataAsOrder<kDGLCUDA, int64_t>(CSRMatrix csr);
162
163
164
165

}  // namespace impl
}  // namespace aten
}  // namespace dgl