coo_sort.hip 5.47 KB
Newer Older
sangwzh's avatar
sangwzh committed
1
2
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
3
/**
4
 *  Copyright (c) 2020 by Contributors
5
6
 * @file array/cuda/coo_sort.cc
 * @brief Sort COO index
7
8
 */
#include <dgl/array.h>
sangwzh's avatar
sangwzh committed
9
10
#include "../../../include/dgl/array.h"

11

12
#include "../../c_api_common.h"
13
#include "../../runtime/cuda/cuda_common.h"
sangwzh's avatar
sangwzh committed
14
#include "utils.h"
15
16
17
18
19
20
21
22
23
24

namespace dgl {

using runtime::NDArray;

namespace aten {
namespace impl {

///////////////////////////// COOSort_ /////////////////////////////

25
/**
26
27
28
29
30
31
32
33
34
35
 * @brief Encode row and column IDs into a single scalar per edge.
 *
 * @tparam IdType The type to encode as.
 * @param row The row (src) IDs per edge.
 * @param col The column (dst) IDs per edge.
 * @param nnz The number of edges.
 * @param col_bits The number of bits used to encode the destination. The row
 * information is packed into the remaining bits.
 * @param key The encoded edges (output).
 */
36
37
template <typename IdType>
__global__ void _COOEncodeEdgesKernel(
38
39
    const IdType* const row, const IdType* const col, const int64_t nnz,
    const int col_bits, IdType* const key) {
40
41
42
43
44
  int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;

  if (tx < nnz) {
    key[tx] = row[tx] << col_bits | col[tx];
  }
45
46
}

47
/**
48
49
50
51
52
53
54
55
56
 * @brief Decode row and column IDs from the encoded edges.
 *
 * @tparam IdType The type the edges are encoded as.
 * @param key The encoded edges.
 * @param nnz The number of edges.
 * @param col_bits The number of bits used to store the column/dst ID.
 * @param row The row (src) IDs per edge (output).
 * @param col The col (dst) IDs per edge (output).
 */
57
58
59
template <typename IdType>
__global__ void _COODecodeEdgesKernel(
    const IdType* const key, const int64_t nnz, const int col_bits,
60
    IdType* const row, IdType* const col) {
61
62
63
64
65
66
  int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;

  if (tx < nnz) {
    const IdType k = key[tx];
    row[tx] = k >> col_bits;
    col[tx] = k & ((1 << col_bits) - 1);
67
  }
68
}
69

70
template <DGLDeviceType XPU, typename IdType>
71
void COOSort_(COOMatrix* coo, bool sort_column) {
sangwzh's avatar
sangwzh committed
72
  hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
73
  const int row_bits = cuda::_NumberOfBits(coo->num_rows);
74
75
76

  const int64_t nnz = coo->row->shape[0];
  if (sort_column) {
77
    const int col_bits = cuda::_NumberOfBits(coo->num_cols);
78
79
80
    const int num_bits = row_bits + col_bits;

    const int nt = 256;
81
82
    const int nb = (nnz + nt - 1) / nt;
    CHECK(static_cast<int64_t>(nb) * nt >= nnz);
83
84
85

    IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits);

86
87
88
    CUDA_KERNEL_CALL(
        _COOEncodeEdgesKernel, nb, nt, 0, stream, coo->row.Ptr<IdType>(),
        coo->col.Ptr<IdType>(), nnz, col_bits, pos.Ptr<IdType>());
89
90
91

    auto sorted = Sort(pos, num_bits);

92
93
94
    CUDA_KERNEL_CALL(
        _COODecodeEdgesKernel, nb, nt, 0, stream, sorted.first.Ptr<IdType>(),
        nnz, col_bits, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>());
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

    if (aten::COOHasData(*coo))
      coo->data = IndexSelect(coo->data, sorted.second);
    else
      coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
    coo->row_sorted = coo->col_sorted = true;
  } else {
    const int num_bits = row_bits;

    auto sorted = Sort(coo->row, num_bits);

    coo->row = sorted.first;
    coo->col = IndexSelect(coo->col, sorted.second);

    if (aten::COOHasData(*coo))
      coo->data = IndexSelect(coo->data, sorted.second);
    else
      coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
    coo->row_sorted = true;
  }
115
116
}

117
118
template void COOSort_<kDGLCUDA, int32_t>(COOMatrix* coo, bool sort_column);
template void COOSort_<kDGLCUDA, int64_t>(COOMatrix* coo, bool sort_column);
119
120
121
122
123

///////////////////////////// COOIsSorted /////////////////////////////

template <typename IdType>
__global__ void _COOIsSortedKernel(
124
125
    const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted,
    int8_t* col_sorted) {
126
127
128
129
130
131
132
133
  int tx = blockIdx.x * blockDim.x + threadIdx.x;
  const int stride_x = gridDim.x * blockDim.x;
  while (tx < nnz) {
    if (tx == 0) {
      row_sorted[0] = 1;
      col_sorted[0] = 1;
    } else {
      row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]);
134
135
      col_sorted[tx] =
          static_cast<int8_t>(row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]);
136
137
138
139
140
    }
    tx += stride_x;
  }
}

141
template <DGLDeviceType XPU, typename IdType>
142
143
144
std::pair<bool, bool> COOIsSorted(COOMatrix coo) {
  const int64_t nnz = coo.row->shape[0];
  const auto& ctx = coo.row->ctx;
sangwzh's avatar
sangwzh committed
145
  hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
146
  auto device = runtime::DeviceAPI::Get(ctx);
147
148
  // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but
  // should be fine.
149
150
151
152
  int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
  int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
  const int nt = cuda::FindNumThreads(nnz);
  const int nb = (nnz + nt - 1) / nt;
153
154
155
  CUDA_KERNEL_CALL(
      _COOIsSortedKernel, nb, nt, 0, stream, coo.row.Ptr<IdType>(),
      coo.col.Ptr<IdType>(), nnz, row_flags, col_flags);
156
157

  const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx);
158
159
  const bool col_sorted =
      row_sorted ? cuda::AllTrue(col_flags, nnz, ctx) : false;
160
161
162
163
164
165
166

  device->FreeWorkspace(ctx, row_flags);
  device->FreeWorkspace(ctx, col_flags);

  return {row_sorted, col_sorted};
}

167
168
template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int32_t>(COOMatrix coo);
template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int64_t>(COOMatrix coo);
169
170
171
172

}  // namespace impl
}  // namespace aten
}  // namespace dgl