spmm.cu 7.38 KB
Newer Older
1
/**
2
 *  Copyright (c) 2020 by Contributors
3
4
 * @file array/cuda/spmm.cu
 * @brief SPMM C APIs and definitions.
5
6
 */
#include <dgl/array.h>
7

8
9
#include <cstdlib>

10
#include "../../runtime/cuda/cuda_common.h"
11
12
13
#include "./functor.cuh"
#include "./ge_spmm.cuh"
#include "./spmm.cuh"
14
15
16
17
18
19
20

namespace dgl {

using namespace cuda;

namespace aten {

21
/**
22
23
 * @brief CUDA implementation of g-SpMM on Csr format.
 * @note use cusparse if the reduce operator is `sum` and there is
24
25
 *       no broadcast, use dgl's kernel in other cases.
 */
26
template <int XPU, typename IdType, typename DType>
27
28
29
30
void SpMMCsr(
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux) {
31
32
  bool is_scalar_efeat = efeat.NumElements() == csr.indices->shape[0];
  bool use_efeat = op != "copy_lhs";
33
34
35
  bool use_deterministic_alg_only = false;
  if (NULL != std::getenv("USE_DETERMINISTIC_ALG"))
    use_deterministic_alg_only = true;
36

37
  if (reduce == "sum") {
38
    bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols);
39
    if (op == "copy_lhs" && cusparse_available<DType, IdType>(more_nnz)) {
40
      // cusparse
41
      int64_t x_length = 1;
42
      for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i];
43
      CusparseCsrmm2<DType, IdType>(
44
          ufeat->ctx, csr, static_cast<DType*>(ufeat->data), nullptr,
45
          static_cast<DType*>(out->data), x_length, use_deterministic_alg_only);
46
47
48
    } else if (
        op == "mul" && is_scalar_efeat &&
        cusparse_available<DType, IdType>(more_nnz)) {
49
      // cusparse
50
      int64_t x_length = 1;
51
      for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i];
52
      if (!IsNullArray(csr.data)) {
53
        efeat = IndexSelect(efeat, csr.data);
54
      }
55
      CusparseCsrmm2<DType, IdType>(
56
57
          ufeat->ctx, csr, static_cast<DType*>(ufeat->data),
          static_cast<DType*>(efeat->data), static_cast<DType*>(out->data),
58
          x_length, use_deterministic_alg_only);
59
    } else {  // general kernel
60
61
62
      SWITCH_OP(op, Op, {
        cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >(
            bcast, csr, ufeat, efeat, out, NullArray(), NullArray());
63
64
65
      });
    }
  } else if (reduce == "max") {
66
67
68
    SWITCH_OP(op, Op, {
      cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >(
          bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
69
70
    });
  } else if (reduce == "min") {
71
72
73
    SWITCH_OP(op, Op, {
      cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >(
          bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
74
75
76
77
78
79
    });
  } else {
    LOG(FATAL) << "Not implemented";
  }
}

80
/**
81
 * @brief CUDA implementation of g-SpMM on Coo format.
82
 */
83
template <int XPU, typename IdType, typename DType>
84
85
86
87
void SpMMCoo(
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux) {
88
  if (reduce == "sum") {
89
    SWITCH_OP(op, Op, {
90
      cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> >(
91
          bcast, coo, ufeat, efeat, out, NullArray(), NullArray());
92
93
    });
  } else if (reduce == "max") {
94
    SWITCH_OP(op, Op, {
95
      cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> >(
96
          bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
97
    });
98
  } else if (reduce == "min") {
99
    SWITCH_OP(op, Op, {
100
      cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> >(
101
          bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
102
103
104
105
106
107
    });
  } else {
    LOG(FATAL) << "Not implemented";
  }
}

108
template void SpMMCsr<kDGLCUDA, int32_t, __half>(
109
110
111
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
112
template void SpMMCsr<kDGLCUDA, int64_t, __half>(
113
114
115
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
116
117
#if BF16_ENABLED
template void SpMMCsr<kDGLCUDA, int32_t, __nv_bfloat16>(
118
119
120
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
121
template void SpMMCsr<kDGLCUDA, int64_t, __nv_bfloat16>(
122
123
124
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
125
126
#endif  // BF16_ENABLED
template void SpMMCsr<kDGLCUDA, int32_t, float>(
127
128
129
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
130
template void SpMMCsr<kDGLCUDA, int64_t, float>(
131
132
133
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
134
template void SpMMCsr<kDGLCUDA, int32_t, double>(
135
136
137
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
138
template void SpMMCsr<kDGLCUDA, int64_t, double>(
139
140
141
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
142

143
template void SpMMCoo<kDGLCUDA, int32_t, __half>(
144
145
146
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
147
template void SpMMCoo<kDGLCUDA, int64_t, __half>(
148
149
150
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
151
152
#if BF16_ENABLED
template void SpMMCoo<kDGLCUDA, int32_t, __nv_bfloat16>(
153
154
155
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
156
template void SpMMCoo<kDGLCUDA, int64_t, __nv_bfloat16>(
157
158
159
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
160
161
#endif  // BF16_ENABLED
template void SpMMCoo<kDGLCUDA, int32_t, float>(
162
163
164
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
165
template void SpMMCoo<kDGLCUDA, int64_t, float>(
166
167
168
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
169
template void SpMMCoo<kDGLCUDA, int32_t, double>(
170
171
172
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
173
template void SpMMCoo<kDGLCUDA, int64_t, double>(
174
175
176
    const std::string& op, const std::string& reduce, const BcastOff& bcast,
    const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out,
    std::vector<NDArray> out_aux);
177
178
179

}  // namespace aten
}  // namespace dgl