spmm.h 22.6 KB
Newer Older
1
/**
2
 *  Copyright (c) 2020 by Contributors
3
4
 * @file array/cpu/spmm.h
 * @brief SPMM CPU kernel function header.
5
6
7
8
9
10
 */
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_

#include <dgl/array.h>
#include <dgl/bcast.h>
11
#include <dgl/runtime/config.h>
12
#include <dgl/runtime/parallel_for.h>
13
#include <math.h>
14

15
#include <algorithm>
16
17
#include <limits>
#include <memory>
18
#include <vector>
19

20
21
#include "spmm_binary_ops.h"
#if !defined(_WIN32)
22
23
24
#ifdef USE_LIBXSMM
#include "spmm_blocking_libxsmm.h"
#endif  // USE_LIBXSMM
25
#endif  // _WIN32
26
27
28
29
namespace dgl {
namespace aten {
namespace cpu {

30
31
32
33
template <typename DType>
using AccType = typename std::conditional<
    std::is_same<DType, BFloat16>::value, float, DType>::type;

34
/**
35
36
37
38
39
40
41
42
 * @brief Naive CPU kernel of SpMM on Csr format.
 * @param cpu_spec JIT'ed kernel
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param X The feature on source nodes.
 * @param W The feature on edges.
 * @param O The result feature on destination nodes.
 * @note it uses node parallel strategy, different threads are responsible
43
44
45
 *       for the computation of different nodes.
 */
template <typename IdType, typename DType, typename Op>
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
typename std::enable_if<!std::is_same<DType, BFloat16>::value, void>::type
SpMMSumCsrNaive(
    const BcastOff& bcast, const CSRMatrix& csr, const DType* X, const DType* W,
    DType* O) {
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = csr.indptr.Ptr<IdType>();
  const IdType* indices = csr.indices.Ptr<IdType>();
  const IdType* edges = csr.data.Ptr<IdType>();
  int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
  runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
    for (auto rid = b; rid < e; ++rid) {
      const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
      DType* out_off = O + rid * dim;
      for (IdType j = row_start; j < row_end; ++j) {
        const IdType cid = indices[j];
        const IdType eid = has_idx ? edges[j] : j;
        for (int64_t k = 0; k < dim; ++k) {
          const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* lhs_off =
              Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
          const DType* rhs_off =
              Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
          out_off[k] += Op::Call(lhs_off, rhs_off);
        }
      }
    }
  });
}

// Naive implementation with additional accumulator, which prevents accuracy
// degradation in less precise data types, like bfloat16.
template <typename IdType, typename DType, typename Op>
typename std::enable_if<std::is_same<DType, BFloat16>::value, void>::type
SpMMSumCsrNaive(
81
82
    const BcastOff& bcast, const CSRMatrix& csr, const DType* X, const DType* W,
    DType* O) {
83
84
85
86
87
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = csr.indptr.Ptr<IdType>();
  const IdType* indices = csr.indices.Ptr<IdType>();
  const IdType* edges = csr.data.Ptr<IdType>();
  int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
88
89
90
91
  runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
    for (auto rid = b; rid < e; ++rid) {
      const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
      DType* out_off = O + rid * dim;
92
93
94
95
96
      for (int64_t k = 0; k < dim; ++k) {
        AccType<DType> acc = 0.;
        for (IdType j = row_start; j < row_end; ++j) {
          const IdType cid = indices[j];
          const IdType eid = has_idx ? edges[j] : j;
97
98
99
          const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* lhs_off =
100
              Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
101
          const DType* rhs_off =
102
              Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
103
          acc += Op::Call(lhs_off, rhs_off);
104
        }
105
        out_off[k] += acc;
106
107
      }
    }
108
  });
109
110
}

111
/**
112
113
114
115
116
117
118
 * @brief CPU kernel of SpMM on Csr format.
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result feature on destination nodes.
 * @note it uses node parallel strategy, different threads are responsible
119
120
121
 *       for the computation of different nodes.
 */
template <typename IdType, typename DType, typename Op>
122
123
124
void SpMMSumCsr(
    const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat,
    NDArray out) {
125
126
127
128
129
130
131
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = csr.indptr.Ptr<IdType>();
  const IdType* indices = csr.indices.Ptr<IdType>();
  const IdType* edges = csr.data.Ptr<IdType>();
  const DType* X = ufeat.Ptr<DType>();
  const DType* W = efeat.Ptr<DType>();
  DType* O = out.Ptr<DType>();
132
133
134
135
136
137
138
  CHECK_NOTNULL(indptr);
  CHECK_NOTNULL(O);
  if (Op::use_lhs) {
    CHECK_NOTNULL(indices);
    CHECK_NOTNULL(X);
  }
  if (Op::use_rhs) {
139
    if (has_idx) CHECK_NOTNULL(edges);
140
141
    CHECK_NOTNULL(W);
  }
142
#if !defined(_WIN32)
143
#ifdef USE_LIBXSMM
144
145
146
  const bool no_libxsmm = bcast.use_bcast ||
                          std::is_same<DType, double>::value ||
                          !dgl::runtime::Config::Global()->IsLibxsmmAvailable();
147
148
  if (!no_libxsmm) {
    SpMMSumCsrLibxsmm<IdType, DType, Op>(bcast, csr, ufeat, efeat, out);
149
  } else {
150
#endif  // USE_LIBXSMM
151
#endif  // _WIN32
152
    SpMMSumCsrNaive<IdType, DType, Op>(bcast, csr, X, W, O);
153
#if !defined(_WIN32)
154
#ifdef USE_LIBXSMM
155
  }
156
#endif  // USE_LIBXSMM
157
#endif  // _WIN32
158
159
}

160
/**
161
162
163
164
165
166
167
 * @brief CPU kernel of SpMM on Coo format.
 * @param bcast Broadcast information.
 * @param coo The Coo matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result feature on destination nodes.
 * @note it uses node parallel strategy, different threads are responsible
168
169
170
171
 *       for the computation of different nodes. To avoid possible data hazard,
 *       we use atomic operators in the reduction phase.
 */
template <typename IdType, typename DType, typename Op>
172
173
typename std::enable_if<!std::is_same<DType, BFloat16>::value, void>::type
SpMMSumCoo(
174
175
    const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat,
    NDArray out) {
176
177
178
179
180
181
  const bool has_idx = !IsNullArray(coo.data);
  const IdType* row = coo.row.Ptr<IdType>();
  const IdType* col = coo.col.Ptr<IdType>();
  const IdType* edges = coo.data.Ptr<IdType>();
  const DType* X = ufeat.Ptr<DType>();
  const DType* W = efeat.Ptr<DType>();
182
  int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
183
184
185
186
187
188
189
190
191
  DType* O = out.Ptr<DType>();
  const int64_t nnz = coo.row->shape[0];
  // fill zero elements
  memset(O, 0, out.GetSize());
  // spmm
#pragma omp parallel for
  for (IdType i = 0; i < nnz; ++i) {
    const IdType rid = row[i];
    const IdType cid = col[i];
192
    const IdType eid = has_idx ? edges[i] : i;
193
194
195
196
    DType* out_off = O + cid * dim;
    for (int64_t k = 0; k < dim; ++k) {
      const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
      const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
197
      const DType* lhs_off =
198
          Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
199
      const DType* rhs_off =
200
          Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
201
      const DType val = Op::Call(lhs_off, rhs_off);
202
      if (val != 0) {
203
#pragma omp atomic
204
205
        out_off[k] += val;
      }
206
207
208
209
    }
  }
}

210
211
212
213
214
215
216
217
template <typename IdType, typename DType, typename Op>
typename std::enable_if<std::is_same<DType, BFloat16>::value, void>::type
SpMMSumCoo(
    const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat,
    NDArray out) {
  LOG(FATAL) << "Unsupported CPU kernel for SpMMSumCoo for BF16.";
}

218
/**
219
220
221
222
223
224
225
 * @brief CPU kernel of SpMM-Min/Max on Csr format.
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result feature on destination nodes.
 * @param argu Arg-Min/Max on source nodes, which refers the source node indices
226
 *        correspond to the minimum/maximum values of reduction result on
227
 *        destination nodes. It's useful in computing gradients of Min/Max
228
 *        reducer.
229
 * @param arge Arg-Min/Max on edges. which refers the source node indices
230
          correspond to the minimum/maximum values of reduction result on
231
 *        destination nodes. It's useful in computing gradients of Min/Max
232
 *        reducer.
233
 * @note It uses node parallel strategy, different threads are responsible for
234
 *       the computation of different nodes.
235
 * @note The result will contain infinity for zero-degree nodes.
236
237
 */
template <typename IdType, typename DType, typename Op, typename Cmp>
238
239
240
void SpMMCmpCsr(
    const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat,
    NDArray out, NDArray argu, NDArray arge) {
241
242
243
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
  const IdType* indices = static_cast<IdType*>(csr.indices->data);
244
  const IdType* edges =
245
      has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
246
247
248
  const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
  const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
  const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
249
250
                rhs_dim = bcast.rhs_len;
  DType* O = static_cast<DType*>(out->data);
251
252
  IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
  IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
253
254
255
256
257
258
259
260
  CHECK_NOTNULL(indptr);
  CHECK_NOTNULL(O);
  if (Op::use_lhs) {
    CHECK_NOTNULL(indices);
    CHECK_NOTNULL(X);
    CHECK_NOTNULL(argX);
  }
  if (Op::use_rhs) {
261
    if (has_idx) CHECK_NOTNULL(edges);
262
263
264
265
266
267
    CHECK_NOTNULL(W);
    CHECK_NOTNULL(argW);
  }
#if !defined(_WIN32)
#ifdef USE_LIBXSMM

268
269
270
  const bool no_libxsmm = bcast.use_bcast ||
                          std::is_same<DType, double>::value ||
                          !dgl::runtime::Config::Global()->IsLibxsmmAvailable();
271
  if (!no_libxsmm) {
272
273
    SpMMCmpCsrLibxsmm<IdType, DType, Op, Cmp>(
        bcast, csr, ufeat, efeat, out, argu, arge);
274
275
276
277
  } else {
#endif  // USE_LIBXSMM
#endif  // _WIN32

278
279
280
281
282
283
284
285
286
287
288
289
290
    runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
      for (auto rid = b; rid < e; ++rid) {
        const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
        DType* out_off = O + rid * dim;
        IdType* argx_off = argX + rid * dim;
        IdType* argw_off = argW + rid * dim;
        for (IdType j = row_start; j < row_end; ++j) {
          const IdType cid = indices[j];
          const IdType eid = has_idx ? edges[j] : j;
          for (int64_t k = 0; k < dim; ++k) {
            const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
            const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
            const DType* lhs_off =
291
                Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
292
            const DType* rhs_off =
293
                Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
294
295
296
297
298
299
300
            const DType val = Op::Call(lhs_off, rhs_off);
            if (Cmp::Call(out_off[k], val)) {
              out_off[k] = val;
              if (Op::use_lhs) argx_off[k] = cid;
              if (Op::use_rhs) argw_off[k] = eid;
            }
          }
301
302
        }
      }
303
    });
304
305
306
307
308
#if !defined(_WIN32)
#ifdef USE_LIBXSMM
  }
#endif  // USE_LIBXSMM
#endif  // _WIN32
309
310
}

311
/**
312
313
314
315
316
317
318
 * @brief CPU kernel of SpMM-Min/Max on Csr format.
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result feature on destination nodes.
 * @param argu Arg-Min/Max on source nodes, which refers the source node indices
319
320
 *        correspond to the minimum/maximum values of reduction result on
 *        destination nodes. It's useful in computing gradients of Min/Max
321
 *        reducer.
322
 * @param arge Arg-Min/Max on edges. which refers the source node indices
323
 *        correspond to the minimum/maximum values of reduction result on
324
 *        destination nodes. It's useful in computing gradients of Min/Max
325
 *        reducer.
326
 * @param argu_ntype Node type of the arg-Min/Max on source nodes, which refers
327
328
329
 *        the source node types correspond to the minimum/maximum values of
 *        reduction result on destination nodes. It's useful in computing
 *        gradients of Min/Max reducer.
330
 * @param arge_etype Edge-type of the arg-Min/Max on edges. which refers the
331
332
333
 *        source node indices correspond to the minimum/maximum values of
 *        reduction result on destination nodes. It's useful in computing
 *        gradients of Min/Max reducer.
334
335
 * @param src_type Node type of the source nodes of an etype
 * @param etype Edge type
336
337
 */
template <typename IdType, typename DType, typename Op, typename Cmp>
338
339
340
341
void SpMMCmpCsrHetero(
    const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat,
    NDArray out, NDArray argu, NDArray arge, NDArray argu_ntype,
    NDArray arge_etype, const int ntype, const int etype) {
342
343
344
345
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
  const IdType* indices = static_cast<IdType*>(csr.indices->data);
  const IdType* edges =
346
      has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
347
348
349
350
351
352
353
  const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
  const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
  const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
                rhs_dim = bcast.rhs_len;
  DType* O = static_cast<DType*>(out->data);
  IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
  IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
354
355
356
357
  IdType* argX_ntype =
      Op::use_lhs ? static_cast<IdType*>(argu_ntype->data) : nullptr;
  IdType* argW_etype =
      Op::use_rhs ? static_cast<IdType*>(arge_etype->data) : nullptr;
358
359
360
361
362
363
364
365
  CHECK_NOTNULL(indptr);
  CHECK_NOTNULL(O);
  if (Op::use_lhs) {
    CHECK_NOTNULL(indices);
    CHECK_NOTNULL(X);
    CHECK_NOTNULL(argX);
  }
  if (Op::use_rhs) {
366
    if (has_idx) CHECK_NOTNULL(edges);
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
    CHECK_NOTNULL(W);
    CHECK_NOTNULL(argW);
  }
  // TODO(Israt): Use LIBXSMM. Homogeneous graph uses LIBXMM when enabled.
  runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
    for (auto rid = b; rid < e; ++rid) {
      const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
      DType* out_off = O + rid * dim;
      IdType* argx_off = argX + rid * dim;
      IdType* argw_off = argW + rid * dim;
      IdType* argx_ntype = argX_ntype + rid * dim;
      IdType* argw_etype = argW_etype + rid * dim;
      for (IdType j = row_start; j < row_end; ++j) {
        const IdType cid = indices[j];
        const IdType eid = has_idx ? edges[j] : j;
        for (int64_t k = 0; k < dim; ++k) {
          const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* lhs_off =
386
              Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
387
          const DType* rhs_off =
388
              Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
          const DType val = Op::Call(lhs_off, rhs_off);
          if (Cmp::Call(out_off[k], val)) {
            out_off[k] = val;
            if (Op::use_lhs) {
              argx_off[k] = cid;
              argx_ntype[k] = ntype;
            }
            if (Op::use_rhs) {
              argw_off[k] = eid;
              argw_etype[k] = etype;
            }
          }
        }
      }
    }
  });
}

407
/**
408
409
410
411
412
413
414
 * @brief CPU kernel of SpMM-Min/Max on Coo format.
 * @param bcast Broadcast information.
 * @param coo The Coo matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result feature on destination nodes.
 * @param argu Arg-Min/Max on source nodes, which refers the source node indices
415
 *        correspond to the minimum/maximum values of reduction result on
416
 *        destination nodes. It's useful in computing gradients of Min/Max
417
 *        reducer.
418
 * @param arge Arg-Min/Max on edges. which refers the source node indices
419
 *        correspond to the minimum/maximum values of reduction result on
420
 *        destination nodes. It's useful in computing gradients of Min/Max
421
 *        reducer.
422
 * @note it uses node parallel strategy, different threads are responsible for
423
424
 *       the computation of different nodes. To avoid possible data hazard, we
 *       use atomic operators in the reduction phase.
425
 * @note The result will contain infinity for zero-degree nodes.
426
427
 */
template <typename IdType, typename DType, typename Op, typename Cmp>
428
429
430
void SpMMCmpCoo(
    const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat,
    NDArray out, NDArray argu, NDArray arge) {
431
432
433
  const bool has_idx = !IsNullArray(coo.data);
  const IdType* row = static_cast<IdType*>(coo.row->data);
  const IdType* col = static_cast<IdType*>(coo.col->data);
434
  const IdType* edges =
435
      has_idx ? static_cast<IdType*>(coo.data->data) : nullptr;
436
437
438
  const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
  const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
  const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
439
440
                rhs_dim = bcast.rhs_len;
  DType* O = static_cast<DType*>(out->data);
441
442
  IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
  IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
443
444
445
446
447
448
449
450
  const int64_t nnz = coo.row->shape[0];
  // fill zero elements
  std::fill(O, O + out.NumElements(), Cmp::zero);
  // spmm
#pragma omp parallel for
  for (IdType i = 0; i < nnz; ++i) {
    const IdType rid = row[i];
    const IdType cid = col[i];
451
    const IdType eid = has_idx ? edges[i] : i;
452
    DType* out_off = O + cid * dim;
453
454
    IdType* argx_off = Op::use_lhs ? argX + cid * dim : nullptr;
    IdType* argw_off = Op::use_rhs ? argW + cid * dim : nullptr;
455
456
457
    for (int64_t k = 0; k < dim; ++k) {
      const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
      const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
458
      const DType* lhs_off =
459
          Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
460
      const DType* rhs_off =
461
          Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
462
463
464
465
      const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
      if (Cmp::Call(out_off[k], val)) {
        out_off[k] = val;
466
467
        if (Op::use_lhs) argx_off[k] = rid;
        if (Op::use_rhs) argw_off[k] = eid;
468
469
470
471
472
      }
    }
  }
}

473
/**
474
475
476
477
478
479
 * @brief CPU kernel of Edge_softmax_csr_forward on Csr format.
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param ufeat The feature on source nodes.
 * @param efeat The feature on edges.
 * @param out The result of edge_softmax_forward.
480
481
 */
template <typename IdType, typename DType, typename Op>
482
483
484
void Edge_softmax_csr_forward(
    const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat,
    NDArray out) {
485
486
487
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
  const IdType* edges =
488
      has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
489
490
491
492
493
  const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
  const int64_t dim = bcast.out_len, rhs_dim = bcast.rhs_len;
  runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
    for (auto rid = b; rid < e; ++rid) {
      const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
494
      std::vector<AccType<DType>> data_e(row_end - row_start, 0);
495
      std::vector<IdType> num(row_end - row_start, 0);
496
497
498
499
500
501
      for (int64_t k = 0; k < dim; ++k) {
        DType max_v = -std::numeric_limits<DType>::infinity();
        for (IdType j = row_start; j < row_end; ++j) {
          const IdType eid = has_idx ? edges[j] : j;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* rhs_off =
502
503
504
              Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
          data_e[j - row_start] = *rhs_off;
          num[j - row_start] = eid * rhs_dim + rhs_add;
505
506
507
508
509
510
511
512
          max_v = std::max<DType>(max_v, (*rhs_off));
        }
        DType exp_sum = 0;
        for (auto& element : data_e) {
          element -= max_v;
          element = std::exp(element);
          exp_sum += element;
        }
513
514
        for (int i = 0; i < row_end - row_start; i++) {
          out.Ptr<DType>()[num[i]] = data_e[i] / exp_sum;
515
516
517
518
519
520
        }
      }
    }
  });
}

521
/**
522
523
524
525
526
527
 * @brief CPU kernel of Edge_softmax_csr_backward on Csr format.
 * @param bcast Broadcast information.
 * @param csr The Csr matrix.
 * @param out The result of forward.
 * @param sds The result of gradiet * out.
 * @param back_out The result of edge_softmax_backward.
528
529
 */
template <typename IdType, typename DType, typename Op>
530
531
532
void Edge_softmax_csr_backward(
    const BcastOff& bcast, const CSRMatrix& csr, NDArray out, NDArray sds,
    NDArray back_out) {
533
534
  typedef typename std::conditional<
      std::is_same<DType, BFloat16>::value, float, DType>::type AccType;
535
536
537
  const bool has_idx = !IsNullArray(csr.data);
  const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
  const IdType* edges =
538
      has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
539
540
541
542
543
544
545
  const DType* W_out = Op::use_rhs ? static_cast<DType*>(out->data) : nullptr;
  const DType* W_sds = Op::use_rhs ? static_cast<DType*>(sds->data) : nullptr;
  const int64_t dim = bcast.out_len, rhs_dim = bcast.rhs_len;
  runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
    for (auto rid = b; rid < e; ++rid) {
      const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
      for (int64_t k = 0; k < dim; ++k) {
546
        AccType sum_sds = 0;
547
548
549
550
        for (IdType j = row_start; j < row_end; ++j) {
          const IdType eid = has_idx ? edges[j] : j;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* rhs_off_sds =
551
              Op::use_rhs ? W_sds + eid * rhs_dim + rhs_add : nullptr;
552
553
          sum_sds += (*rhs_off_sds);
        }
554
        for (IdType j = row_start; j < row_end; ++j) {
555
556
557
          const IdType eid = has_idx ? edges[j] : j;
          const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
          const DType* rhs_off_out =
558
              Op::use_rhs ? W_out + eid * rhs_dim + rhs_add : nullptr;
559
          const DType* rhs_off_sds =
560
561
562
              Op::use_rhs ? W_sds + eid * rhs_dim + rhs_add : nullptr;
          back_out.Ptr<DType>()[eid * rhs_dim + rhs_add] =
              (*rhs_off_sds) - sum_sds * (*rhs_off_out);
563
564
565
566
567
568
        }
      }
    }
  });
}

569
570
571
572
573
}  // namespace cpu
}  // namespace aten
}  // namespace dgl

#endif  // DGL_ARRAY_CPU_SPMM_H_