"vscode:/vscode.git/clone" did not exist on "dacc7afeb21e9469bfb4e5f5750ecd861221fd29"
csr_sort.cu 4.88 KB
Newer Older
1
/**
2
 *  Copyright (c) 2020 by Contributors
3
4
 * @file array/cuda/csr_sort.cc
 * @brief Sort CSR index
5
6
 */
#include <dgl/array.h>
7

8
9
#include <cub/cub.cuh>

10
#include "../../runtime/cuda/cuda_common.h"
11
#include "./utils.h"
12
13
14
15
16
17
18
19

namespace dgl {

using runtime::NDArray;

namespace aten {
namespace impl {

20
/**
21
 * @brief Check whether each row is sorted.
22
23
24
 */
template <typename IdType>
__global__ void _SegmentIsSorted(
25
26
    const IdType* indptr, const IdType* indices, int64_t num_rows,
    int8_t* flags) {
27
28
29
30
31
32
33
34
35
36
37
38
  int tx = blockIdx.x * blockDim.x + threadIdx.x;
  const int stride_x = gridDim.x * blockDim.x;
  while (tx < num_rows) {
    bool f = true;
    for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) {
      f = (indices[i - 1] <= indices[i]);
    }
    flags[tx] = static_cast<int8_t>(f);
    tx += stride_x;
  }
}

39
template <DGLDeviceType XPU, typename IdType>
40
41
bool CSRIsSorted(CSRMatrix csr) {
  const auto& ctx = csr.indptr->ctx;
42
  cudaStream_t stream = runtime::getCurrentCUDAStream();
43
  auto device = runtime::DeviceAPI::Get(ctx);
44
45
46
47
  // We allocate a workspace of num_rows bytes. It wastes a little bit memory
  // but should be fine.
  int8_t* flags =
      static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows));
48
49
  const int nt = cuda::FindNumThreads(csr.num_rows);
  const int nb = (csr.num_rows + nt - 1) / nt;
50
51
52
  CUDA_KERNEL_CALL(
      _SegmentIsSorted, nb, nt, 0, stream, csr.indptr.Ptr<IdType>(),
      csr.indices.Ptr<IdType>(), csr.num_rows, flags);
53
54
55
56
57
  bool ret = cuda::AllTrue(flags, csr.num_rows, ctx);
  device->FreeWorkspace(ctx, flags);
  return ret;
}

58
59
template bool CSRIsSorted<kDGLCUDA, int32_t>(CSRMatrix csr);
template bool CSRIsSorted<kDGLCUDA, int64_t>(CSRMatrix csr);
60

61
template <DGLDeviceType XPU, typename IdType>
62
void CSRSort_(CSRMatrix* csr) {
63
64
65
66
  LOG(FATAL) << "Unreachable codes";
}

template <>
67
void CSRSort_<kDGLCUDA, int32_t>(CSRMatrix* csr) {
68
69
  auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
  auto device = runtime::DeviceAPI::Get(csr->indptr->ctx);
70
  cudaStream_t stream = runtime::getCurrentCUDAStream();
71
72
73
74
  // allocate cusparse handle if needed
  if (!thr_entry->cusparse_handle) {
    CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
  }
75
  CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, stream));
76
77
78
79
80
81
82
83
84
85
86

  NDArray indptr = csr->indptr;
  NDArray indices = csr->indices;
  const auto& ctx = indptr->ctx;
  const int64_t nnz = indices->shape[0];
  if (!aten::CSRHasData(*csr))
    csr->data = aten::Range(0, nnz, indices->dtype.bits, ctx);
  NDArray data = csr->data;

  size_t workspace_size = 0;
  CUSPARSE_CALL(cusparseXcsrsort_bufferSizeExt(
87
88
      thr_entry->cusparse_handle, csr->num_rows, csr->num_cols, nnz,
      indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(), &workspace_size));
89
90
91
92
93
94
95
  void* workspace = device->AllocWorkspace(ctx, workspace_size);

  cusparseMatDescr_t descr;
  CUSPARSE_CALL(cusparseCreateMatDescr(&descr));
  CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
  CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
  CUSPARSE_CALL(cusparseXcsrsort(
96
97
      thr_entry->cusparse_handle, csr->num_rows, csr->num_cols, nnz, descr,
      indptr.Ptr<int32_t>(), indices.Ptr<int32_t>(), data.Ptr<int32_t>(),
98
99
100
101
102
103
104
105
106
      workspace));

  csr->sorted = true;

  // free resources
  CUSPARSE_CALL(cusparseDestroyMatDescr(descr));
  device->FreeWorkspace(ctx, workspace);
}

107
template <>
108
void CSRSort_<kDGLCUDA, int64_t>(CSRMatrix* csr) {
109
  cudaStream_t stream = runtime::getCurrentCUDAStream();
110
111
112
113
114
  auto device = runtime::DeviceAPI::Get(csr->indptr->ctx);

  const auto& ctx = csr->indptr->ctx;
  const int64_t nnz = csr->indices->shape[0];
  const auto nbits = csr->indptr->dtype.bits;
115
  if (!aten::CSRHasData(*csr)) csr->data = aten::Range(0, nnz, nbits, ctx);
116
117
118
119
120
121
122
123
124
125
126
127

  IdArray new_indices = csr->indices.Clone();
  IdArray new_data = csr->data.Clone();

  const int64_t* offsets = csr->indptr.Ptr<int64_t>();
  const int64_t* key_in = csr->indices.Ptr<int64_t>();
  int64_t* key_out = new_indices.Ptr<int64_t>();
  const int64_t* value_in = csr->data.Ptr<int64_t>();
  int64_t* value_out = new_data.Ptr<int64_t>();

  // Allocate workspace
  size_t workspace_size = 0;
128
129
130
  CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairs(
      nullptr, workspace_size, key_in, key_out, value_in, value_out, nnz,
      csr->num_rows, offsets, offsets + 1, 0, sizeof(int64_t) * 8, stream));
131
132
133
  void* workspace = device->AllocWorkspace(ctx, workspace_size);

  // Compute
134
135
136
  CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairs(
      workspace, workspace_size, key_in, key_out, value_in, value_out, nnz,
      csr->num_rows, offsets, offsets + 1, 0, sizeof(int64_t) * 8, stream));
137
138
139
140

  csr->sorted = true;
  csr->indices = new_indices;
  csr->data = new_data;
141
142
143

  // free resources
  device->FreeWorkspace(ctx, workspace);
144
145
}

146
147
template void CSRSort_<kDGLCUDA, int32_t>(CSRMatrix* csr);
template void CSRSort_<kDGLCUDA, int64_t>(CSRMatrix* csr);
148
149
150
151

}  // namespace impl
}  // namespace aten
}  // namespace dgl