Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file kernel/cpu/gaher_mm.cc * @file kernel/cpu/gaher_mm.cc
* \brief GatherMM C APIs and definitions. * @brief GatherMM C APIs and definitions.
*/ */
#include "./gather_mm.h" #include "./gather_mm.h"
#include <dgl/array.h> #include <dgl/array.h>
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! \brief Generalized SegmentMM. */ /*! @brief Generalized SegmentMM. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SegmentMM(const NDArray A, void SegmentMM(const NDArray A,
const NDArray B, const NDArray B,
...@@ -27,7 +27,7 @@ void SegmentMMBackwardB(const NDArray A, ...@@ -27,7 +27,7 @@ void SegmentMMBackwardB(const NDArray A,
LOG(FATAL) << "Unsupported CPU kernel for SegmentMMBackwardB."; LOG(FATAL) << "Unsupported CPU kernel for SegmentMMBackwardB.";
} }
/*! \brief Generalized GatherMM. */ /*! @brief Generalized GatherMM. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void GatherMM(const NDArray A, void GatherMM(const NDArray A,
const NDArray B, const NDArray B,
...@@ -37,7 +37,7 @@ void GatherMM(const NDArray A, ...@@ -37,7 +37,7 @@ void GatherMM(const NDArray A,
LOG(FATAL) << "Unsupported CPU kernel for GatherMM."; LOG(FATAL) << "Unsupported CPU kernel for GatherMM.";
} }
/*! \brief Generalized GatherMM_scatter. */ /*! @brief Generalized GatherMM_scatter. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void GatherMMScatter(const NDArray A, void GatherMMScatter(const NDArray A,
const NDArray B, const NDArray B,
......
/*! /*!
* Copyright (c) 2022 by Contributors * Copyright (c) 2022 by Contributors
* \file array/cpu/gather_mm.h * @file array/cpu/gather_mm.h
* \brief GATHER_MM CPU kernel function header. * @brief GATHER_MM CPU kernel function header.
*/ */
#ifndef DGL_ARRAY_CPU_GATHER_MM_H_ #ifndef DGL_ARRAY_CPU_GATHER_MM_H_
#define DGL_ARRAY_CPU_GATHER_MM_H_ #define DGL_ARRAY_CPU_GATHER_MM_H_
...@@ -46,15 +46,15 @@ void matmul( ...@@ -46,15 +46,15 @@ void matmul(
} }
/*! /*!
* \brief CPU kernel of Gather_mm. The input matrix A is expected to be * @brief CPU kernel of Gather_mm. The input matrix A is expected to be
* sorted according to relation type. * sorted according to relation type.
* \param A The input dense matrix of dimension m x k * @param A The input dense matrix of dimension m x k
* \param B The input dense matrix of dimension k x n * @param B The input dense matrix of dimension k x n
* \param C The output dense matrix od dimension m x n * @param C The output dense matrix od dimension m x n
* \param A_dim1_per_rel The number of rows in each relation in A * @param A_dim1_per_rel The number of rows in each relation in A
* \param B_dim1_per_rel The number of rows in each relation in B * @param B_dim1_per_rel The number of rows in each relation in B
* \param a_trans Matrix A to be transposed * @param a_trans Matrix A to be transposed
* \param b_trans Matrix B to be transposed * @param b_trans Matrix B to be transposed
*/ */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void gatherMM_SortedEtype( void gatherMM_SortedEtype(
......
/*! /*!
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* \file array/cpu/negative_sampling.cc * @file array/cpu/negative_sampling.cc
* \brief Uniform negative sampling on CSR. * @brief Uniform negative sampling on CSR.
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_pick.h * @file array/cpu/rowwise_pick.h
* \brief Template implementation for rowwise pick operators. * @brief Template implementation for rowwise pick operators.
*/ */
#ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_ #ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_
#define DGL_ARRAY_CPU_ROWWISE_PICK_H_ #define DGL_ARRAY_CPU_ROWWISE_PICK_H_
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_sampling.cc * @file array/cpu/rowwise_sampling.cc
* \brief rowwise sampling * @brief rowwise sampling
*/ */
#include <dgl/random.h> #include <dgl/random.h>
#include <numeric> #include <numeric>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_topk.cc * @file array/cpu/rowwise_topk.cc
* \brief rowwise topk * @brief rowwise topk
*/ */
#include <numeric> #include <numeric>
#include <algorithm> #include <algorithm>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file aten/cpu/sddmm.cc * @file aten/cpu/sddmm.cc
* \brief SDDMM C APIs and definitions. * @brief SDDMM C APIs and definitions.
*/ */
#include "./sddmm.h" #include "./sddmm.h"
#include <dgl/array.h> #include <dgl/array.h>
...@@ -42,7 +42,7 @@ namespace aten { ...@@ -42,7 +42,7 @@ namespace aten {
} while (0) } while (0)
/*! \brief Generalized SDDMM on Csr format. */ /*! @brief Generalized SDDMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCsr(const std::string& op, void SDDMMCsr(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op, ...@@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op,
}); });
} }
/*! \brief Generalized SDDMM on Csr format with Heterograph support. */ /*! @brief Generalized SDDMM on Csr format with Heterograph support. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCsrHetero(const std::string& op, void SDDMMCsrHetero(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -131,7 +131,7 @@ template void SDDMMCsrHetero<kDGLCPU, int64_t, double>( ...@@ -131,7 +131,7 @@ template void SDDMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& in_eid, const std::vector<dgl_type_t>& in_eid,
const std::vector<dgl_type_t>& out_eid); const std::vector<dgl_type_t>& out_eid);
/*! \brief Generalized SDDMM on Coo format. */ /*! @brief Generalized SDDMM on Coo format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCoo(const std::string& op, void SDDMMCoo(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op, ...@@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op,
}); });
} }
/*! \brief Generalized SDDMM on Coo format with Heterograph support. */ /*! @brief Generalized SDDMM on Coo format with Heterograph support. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCooHetero(const std::string& op, void SDDMMCooHetero(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/sddmm.h * @file array/cpu/sddmm.h
* \brief SDDMM CPU kernel function header. * @brief SDDMM CPU kernel function header.
*/ */
#ifndef DGL_ARRAY_CPU_SDDMM_H_ #ifndef DGL_ARRAY_CPU_SDDMM_H_
#define DGL_ARRAY_CPU_SDDMM_H_ #define DGL_ARRAY_CPU_SDDMM_H_
...@@ -17,13 +17,13 @@ namespace aten { ...@@ -17,13 +17,13 @@ namespace aten {
namespace cpu { namespace cpu {
/*! /*!
* \brief CPU kernel of g-SDDMM on Csr format. * @brief CPU kernel of g-SDDMM on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param lhs The left hand side operand feature. * @param lhs The left hand side operand feature.
* \param rhs The right hand size operand feature. * @param rhs The right hand size operand feature.
* \param out The result feature on edges. * @param out The result feature on edges.
* \note it uses node parallel strategy, different threads are responsible * @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. * for the computation of different nodes.
*/ */
template < template <
...@@ -69,13 +69,13 @@ void SDDMMCsr( ...@@ -69,13 +69,13 @@ void SDDMMCsr(
} }
/*! /*!
* \brief CPU kernel of g-SDDMM on Coo format. * @brief CPU kernel of g-SDDMM on Coo format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param coo The COO matrix. * @param coo The COO matrix.
* \param lhs The left hand side operand feature. * @param lhs The left hand side operand feature.
* \param rhs The right hand size operand feature. * @param rhs The right hand size operand feature.
* \param out The result feature on edges. * @param out The result feature on edges.
* \note it uses edge parallel strategy, different threads are responsible * @note it uses edge parallel strategy, different threads are responsible
* for the computation of different edges. * for the computation of different edges.
*/ */
template < template <
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file kernel/cpu/segment_reduce.cc * @file kernel/cpu/segment_reduce.cc
* \brief Segment reduce C APIs and definitions. * @brief Segment reduce C APIs and definitions.
*/ */
#include "./segment_reduce.h" #include "./segment_reduce.h"
#include <dgl/array.h> #include <dgl/array.h>
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! \brief Segment Reduce operator. */ /*! @brief Segment Reduce operator. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SegmentReduce( void SegmentReduce(
const std::string& op, const std::string& op,
...@@ -34,7 +34,7 @@ void SegmentReduce( ...@@ -34,7 +34,7 @@ void SegmentReduce(
} }
} }
/*! \brief Scatter Add.*/ /*! @brief Scatter Add.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void ScatterAdd(NDArray feat, void ScatterAdd(NDArray feat,
NDArray idx, NDArray idx,
...@@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat, ...@@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat,
cpu::ScatterAdd<IdType, DType>(feat, idx, out); cpu::ScatterAdd<IdType, DType>(feat, idx, out);
} }
/*! \brief Update gradients for reduce operator max/min on heterogeneous graph.*/ /*! @brief Update gradients for reduce operator max/min on heterogeneous graph.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
const std::string& op, const std::string& op,
...@@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, ...@@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
cpu::UpdateGradMinMax_hetero<IdType, DType>(g, op, feat, idx, idx_etype, out); cpu::UpdateGradMinMax_hetero<IdType, DType>(g, op, feat, idx, idx_etype, out);
} }
/*! \brief Backward function of segment cmp.*/ /*! @brief Backward function of segment cmp.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void BackwardSegmentCmp( void BackwardSegmentCmp(
NDArray feat, NDArray feat,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h * @file array/cpu/spmm.h
* \brief Segment reduce kernel function header. * @brief Segment reduce kernel function header.
*/ */
#ifndef DGL_ARRAY_CPU_SEGMENT_REDUCE_H_ #ifndef DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
#define DGL_ARRAY_CPU_SEGMENT_REDUCE_H_ #define DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
...@@ -18,10 +18,10 @@ namespace aten { ...@@ -18,10 +18,10 @@ namespace aten {
namespace cpu { namespace cpu {
/*! /*!
* \brief CPU kernel of segment sum. * @brief CPU kernel of segment sum.
* \param feat The input tensor. * @param feat The input tensor.
* \param offsets The offset tensor storing the ranges of segments. * @param offsets The offset tensor storing the ranges of segments.
* \param out The output tensor. * @param out The output tensor.
*/ */
template <typename IdType, typename DType> template <typename IdType, typename DType>
void SegmentSum(NDArray feat, NDArray offsets, NDArray out) { void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
...@@ -43,11 +43,11 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) { ...@@ -43,11 +43,11 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
} }
/*! /*!
* \brief CPU kernel of segment min/max. * @brief CPU kernel of segment min/max.
* \param feat The input tensor. * @param feat The input tensor.
* \param offsets The offset tensor storing the ranges of segments. * @param offsets The offset tensor storing the ranges of segments.
* \param out The output tensor. * @param out The output tensor.
* \param arg An auxiliary tensor storing the argmin/max information * @param arg An auxiliary tensor storing the argmin/max information
* used in backward phase. * used in backward phase.
*/ */
template <typename IdType, typename DType, typename Cmp> template <typename IdType, typename DType, typename Cmp>
...@@ -77,11 +77,11 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) { ...@@ -77,11 +77,11 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) {
} }
/*! /*!
* \brief CPU kernel of Scatter Add (on first dimension) operator. * @brief CPU kernel of Scatter Add (on first dimension) operator.
* \note math equation: out[idx[i], *] += feat[i, *] * @note math equation: out[idx[i], *] += feat[i, *]
* \param feat The input tensor. * @param feat The input tensor.
* \param idx The indices tensor. * @param idx The indices tensor.
* \param out The output tensor. * @param out The output tensor.
*/ */
template <typename IdType, typename DType> template <typename IdType, typename DType>
void ScatterAdd(NDArray feat, NDArray idx, NDArray out) { void ScatterAdd(NDArray feat, NDArray idx, NDArray out) {
...@@ -102,13 +102,13 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) { ...@@ -102,13 +102,13 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) {
} }
/*! /*!
* \brief CPU kernel to update gradients for reduce op max/min * @brief CPU kernel to update gradients for reduce op max/min
* \param graph The input heterogeneous graph. * @param graph The input heterogeneous graph.
* \param op The binary operator, could be `copy_u`, `copy_e'. * @param op The binary operator, could be `copy_u`, `copy_e'.
* \param list_feat List of the input tensors. * @param list_feat List of the input tensors.
* \param list_idx List of the indices tensors. * @param list_idx List of the indices tensors.
* \param list_idx_etype List of the node- or edge-type tensors. * @param list_idx_etype List of the node- or edge-type tensors.
* \param list_out List of the output tensors. * @param list_out List of the output tensors.
*/ */
template <typename IdType, typename DType> template <typename IdType, typename DType>
void UpdateGradMinMax_hetero( void UpdateGradMinMax_hetero(
...@@ -160,11 +160,11 @@ void UpdateGradMinMax_hetero( ...@@ -160,11 +160,11 @@ void UpdateGradMinMax_hetero(
} }
/*! /*!
* \brief CPU kernel of backward phase of segment min/max. * @brief CPU kernel of backward phase of segment min/max.
* \note math equation: out[arg[i, k], k] = feat[i, k] * @note math equation: out[arg[i, k], k] = feat[i, k]
* \param feat The input tensor. * @param feat The input tensor.
* \param arg The argmin/argmax tensor. * @param arg The argmin/argmax tensor.
* \param out The output tensor. * @param out The output tensor.
*/ */
template <typename IdType, typename DType> template <typename IdType, typename DType>
void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) {
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/cpu/spmat_op_impl.cc * @file array/cpu/spmat_op_impl.cc
* \brief CPU implementation of COO sparse matrix operators * @brief CPU implementation of COO sparse matrix operators
*/ */
#include <dgl/runtime/parallel_for.h> #include <dgl/runtime/parallel_for.h>
#include <dmlc/omp.h> #include <dmlc/omp.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/cpu/spmat_op_impl_csr.cc * @file array/cpu/spmat_op_impl_csr.cc
* \brief CSR matrix operator CPU implementation * @brief CSR matrix operator CPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/runtime/parallel_for.h> #include <dgl/runtime/parallel_for.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file kernel/cpu/spmm.cc * @file kernel/cpu/spmm.cc
* \brief SPMM C APIs and definitions. * @brief SPMM C APIs and definitions.
*/ */
#include "./spmm.h" #include "./spmm.h"
#include <dgl/array.h> #include <dgl/array.h>
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! \brief Generalized SpMM on Csr format. */ /*! @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCsr(const std::string& op, const std::string& reduce, void SpMMCsr(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce, ...@@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce,
} }
} }
/*! \brief Generalized SpMM on Csr format. */ /*! @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCsrHetero(const std::string& op, const std::string& reduce, void SpMMCsrHetero(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -159,7 +159,7 @@ template void SpMMCsrHetero<kDGLCPU, int64_t, double>( ...@@ -159,7 +159,7 @@ template void SpMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& ufeat_node_tids, const std::vector<dgl_type_t>& ufeat_node_tids,
const std::vector<dgl_type_t>& out_node_tids); const std::vector<dgl_type_t>& out_node_tids);
/*! \brief Edge_softmax_csr forward op on Csr format. */ /*! @brief Edge_softmax_csr forward op on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_forward(const std::string& op, void Edge_softmax_csr_forward(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op, ...@@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op,
}); });
} }
/*! \brief Edge_softmax_csr backward op on Csr format. */ /*! @brief Edge_softmax_csr backward op on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_backward(const std::string& op, void Edge_softmax_csr_backward(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward<kDGLCPU, int64_t, double>( ...@@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward<kDGLCPU, int64_t, double>(
const BcastOff& bcast, const CSRMatrix& csr, const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out); NDArray ufeat, NDArray efeat, NDArray out);
/*! \brief Generalized SpMM on Coo format. */ /*! @brief Generalized SpMM on Coo format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCoo(const std::string& op, const std::string& reduce, void SpMMCoo(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h * @file array/cpu/spmm.h
* \brief SPMM CPU kernel function header. * @brief SPMM CPU kernel function header.
*/ */
#ifndef DGL_ARRAY_CPU_SPMM_H_ #ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_ #define DGL_ARRAY_CPU_SPMM_H_
...@@ -33,14 +33,14 @@ namespace cpu { ...@@ -33,14 +33,14 @@ namespace cpu {
#if !defined(_WIN32) #if !defined(_WIN32)
#ifdef USE_AVX #ifdef USE_AVX
/*! /*!
* \brief CPU kernel of SpMM on Csr format using Xbyak. * @brief CPU kernel of SpMM on Csr format using Xbyak.
* \param cpu_spec JIT'ed kernel * @param cpu_spec JIT'ed kernel
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param X The feature on source nodes. * @param X The feature on source nodes.
* \param W The feature on edges. * @param W The feature on edges.
* \param O The result feature on destination nodes. * @param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible * @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. For each edge, it uses the * for the computation of different nodes. For each edge, it uses the
* JIT'ed kernel. * JIT'ed kernel.
*/ */
...@@ -70,14 +70,14 @@ void SpMMSumCsrXbyak( ...@@ -70,14 +70,14 @@ void SpMMSumCsrXbyak(
#endif // _WIN32 #endif // _WIN32
/*! /*!
* \brief Naive CPU kernel of SpMM on Csr format. * @brief Naive CPU kernel of SpMM on Csr format.
* \param cpu_spec JIT'ed kernel * @param cpu_spec JIT'ed kernel
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param X The feature on source nodes. * @param X The feature on source nodes.
* \param W The feature on edges. * @param W The feature on edges.
* \param O The result feature on destination nodes. * @param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible * @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. * for the computation of different nodes.
*/ */
template <typename IdType, typename DType, typename Op> template <typename IdType, typename DType, typename Op>
...@@ -111,13 +111,13 @@ void SpMMSumCsrNaive( ...@@ -111,13 +111,13 @@ void SpMMSumCsrNaive(
} }
/*! /*!
* \brief CPU kernel of SpMM on Csr format. * @brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible * @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. * for the computation of different nodes.
*/ */
template <typename IdType, typename DType, typename Op> template <typename IdType, typename DType, typename Op>
...@@ -177,13 +177,13 @@ void SpMMSumCsr( ...@@ -177,13 +177,13 @@ void SpMMSumCsr(
} }
/*! /*!
* \brief CPU kernel of SpMM on Coo format. * @brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param coo The Coo matrix. * @param coo The Coo matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible * @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard, * for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase. * we use atomic operators in the reduction phase.
*/ */
...@@ -226,23 +226,23 @@ void SpMMSumCoo( ...@@ -226,23 +226,23 @@ void SpMMSumCoo(
} }
/*! /*!
* \brief CPU kernel of SpMM-Min/Max on Csr format. * @brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices * @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on * correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices * @param arge Arg-Min/Max on edges. which refers the source node indices
correspond to the minimum/maximum values of reduction result on correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \note It uses node parallel strategy, different threads are responsible for * @note It uses node parallel strategy, different threads are responsible for
* the computation of different nodes. * the computation of different nodes.
* \note The result will contain infinity for zero-degree nodes. * @note The result will contain infinity for zero-degree nodes.
*/ */
template <typename IdType, typename DType, typename Op, typename Cmp> template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr( void SpMMCmpCsr(
...@@ -323,30 +323,30 @@ void SpMMCmpCsr( ...@@ -323,30 +323,30 @@ void SpMMCmpCsr(
} }
/*! /*!
* \brief CPU kernel of SpMM-Min/Max on Csr format. * @brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices * @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on * correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices * @param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on * correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \param argu_ntype Node type of the arg-Min/Max on source nodes, which refers * @param argu_ntype Node type of the arg-Min/Max on source nodes, which refers
* the source node types correspond to the minimum/maximum values of * the source node types correspond to the minimum/maximum values of
* reduction result on destination nodes. It's useful in computing * reduction result on destination nodes. It's useful in computing
* gradients of Min/Max reducer. * gradients of Min/Max reducer.
* \param arge_etype Edge-type of the arg-Min/Max on edges. which refers the * @param arge_etype Edge-type of the arg-Min/Max on edges. which refers the
* source node indices correspond to the minimum/maximum values of * source node indices correspond to the minimum/maximum values of
* reduction result on destination nodes. It's useful in computing * reduction result on destination nodes. It's useful in computing
* gradients of Min/Max reducer. * gradients of Min/Max reducer.
* \param src_type Node type of the source nodes of an etype * @param src_type Node type of the source nodes of an etype
* \param etype Edge type * @param etype Edge type
*/ */
template <typename IdType, typename DType, typename Op, typename Cmp> template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrHetero( void SpMMCmpCsrHetero(
...@@ -419,24 +419,24 @@ void SpMMCmpCsrHetero( ...@@ -419,24 +419,24 @@ void SpMMCmpCsrHetero(
} }
/*! /*!
* \brief CPU kernel of SpMM-Min/Max on Coo format. * @brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param coo The Coo matrix. * @param coo The Coo matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices * @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on * correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices * @param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on * correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max * destination nodes. It's useful in computing gradients of Min/Max
* reducer. * reducer.
* \note it uses node parallel strategy, different threads are responsible for * @note it uses node parallel strategy, different threads are responsible for
* the computation of different nodes. To avoid possible data hazard, we * the computation of different nodes. To avoid possible data hazard, we
* use atomic operators in the reduction phase. * use atomic operators in the reduction phase.
* \note The result will contain infinity for zero-degree nodes. * @note The result will contain infinity for zero-degree nodes.
*/ */
template <typename IdType, typename DType, typename Op, typename Cmp> template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo( void SpMMCmpCoo(
...@@ -485,12 +485,12 @@ void SpMMCmpCoo( ...@@ -485,12 +485,12 @@ void SpMMCmpCoo(
} }
/*! /*!
* \brief CPU kernel of Edge_softmax_csr_forward on Csr format. * @brief CPU kernel of Edge_softmax_csr_forward on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result of edge_softmax_forward. * @param out The result of edge_softmax_forward.
*/ */
template <typename IdType, typename DType, typename Op> template <typename IdType, typename DType, typename Op>
void Edge_softmax_csr_forward( void Edge_softmax_csr_forward(
...@@ -533,12 +533,12 @@ void Edge_softmax_csr_forward( ...@@ -533,12 +533,12 @@ void Edge_softmax_csr_forward(
} }
/*! /*!
* \brief CPU kernel of Edge_softmax_csr_backward on Csr format. * @brief CPU kernel of Edge_softmax_csr_backward on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param out The result of forward. * @param out The result of forward.
* \param sds The result of gradiet * out. * @param sds The result of gradiet * out.
* \param back_out The result of edge_softmax_backward. * @param back_out The result of edge_softmax_backward.
*/ */
template <typename IdType, typename DType, typename Op> template <typename IdType, typename DType, typename Op>
void Edge_softmax_csr_backward( void Edge_softmax_csr_backward(
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/spmm_binary_ops.h * @file array/cpu/spmm_binary_ops.h
* \brief SPMM CPU Binary ops. * @brief SPMM CPU Binary ops.
*/ */
#ifndef DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_ #ifndef DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_
#define DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_ #define DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_
......
/*! /*!
* Copyright (c) 2021 Intel Corporation * Copyright (c) 2021 Intel Corporation
* \file array/cpu/spmm.h * @file array/cpu/spmm.h
* \brief SPMM CPU kernel function header. * @brief SPMM CPU kernel function header.
* \author Sanchit Misra <sanchit.misra@intel.com>, * @author Sanchit Misra <sanchit.misra@intel.com>,
* Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>, * Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>,
* Vasimuddin Md <vasimuddin.md@intel.com>, * Vasimuddin Md <vasimuddin.md@intel.com>,
* Sasikanth Avancha <sasikanth.avancha@intel.com> * Sasikanth Avancha <sasikanth.avancha@intel.com>
...@@ -49,19 +49,19 @@ int32_t GetLLCSize() { ...@@ -49,19 +49,19 @@ int32_t GetLLCSize() {
} }
/*! /*!
* \brief Tile the CSR matrix to roughly make sure that the column tiles and * @brief Tile the CSR matrix to roughly make sure that the column tiles and
* corresponding neighbor features fit into LLC and the row tiles * corresponding neighbor features fit into LLC and the row tiles
* are assigned to OMP threads. * are assigned to OMP threads.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency * @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix. * matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency * @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix. * matrix.
* \param M_block_size block size along the rows of adjacency matrix. * @param M_block_size block size along the rows of adjacency matrix.
* \param K_block_size block size along the columns of adjacency matrix. * @param K_block_size block size along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs. * @param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs. * @param use_rhs Whether to use rhs.
*/ */
template <typename IdType> template <typename IdType>
inline void SpMMCreateBlocks( inline void SpMMCreateBlocks(
...@@ -166,12 +166,12 @@ inline void SpMMCreateBlocks( ...@@ -166,12 +166,12 @@ inline void SpMMCreateBlocks(
} }
/*! /*!
* \brief Create libxsmm kernel. * @brief Create libxsmm kernel.
* \param has_idx For the edge features, are there indices available. * @param has_idx For the edge features, are there indices available.
* \param N Feature size. * @param N Feature size.
* \param redop_flag Flag specifying the reduction operation. * @param redop_flag Flag specifying the reduction operation.
* \param is_cmp Is the reduction operation a compare operation. * @param is_cmp Is the reduction operation a compare operation.
* \note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel. * @note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel.
* Given a node u, the kernel performs an elementwise "Op" on the * Given a node u, the kernel performs an elementwise "Op" on the
* features of the neighbors and/or the edges incident on u. * features of the neighbors and/or the edges incident on u.
* Subsequently, it performs an elementwise "Redop" on all such * Subsequently, it performs an elementwise "Redop" on all such
...@@ -270,19 +270,19 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel( ...@@ -270,19 +270,19 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel(
} }
/*! /*!
* \brief Use libxsmm to perform SpMM-Sum on all blocks. * @brief Use libxsmm to perform SpMM-Sum on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes. * @param B The feature on source nodes.
* \param E The feature on edges. * @param E The feature on edges.
* \param C The result feature on destination nodes. * @param C The result feature on destination nodes.
* \param has_idx For the edge features, are there indices available. * @param has_idx For the edge features, are there indices available.
* \param N Feature size. * @param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency * @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix. * matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency * @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix. * matrix.
* \param M_block_size block size along the rows of adjacency matrix. * @param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel. * @param kernel The libxsmm kernel.
*/ */
template <typename IdType, typename DType> template <typename IdType, typename DType>
inline void SpMMBlockwiseOpSum( inline void SpMMBlockwiseOpSum(
...@@ -327,21 +327,21 @@ inline void SpMMBlockwiseOpSum( ...@@ -327,21 +327,21 @@ inline void SpMMBlockwiseOpSum(
} }
/*! /*!
* \brief Use libxsmm to perform SpMM-Max/Min on all blocks. * @brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes. * @param B The feature on source nodes.
* \param E The feature on edges. * @param E The feature on edges.
* \param C The result feature on destination nodes. * @param C The result feature on destination nodes.
* \param argB Arg-Min/Max on source nodes. * @param argB Arg-Min/Max on source nodes.
* \param argE Arg-Min/Max on edges. * @param argE Arg-Min/Max on edges.
* \param has_idx For the edge features, are there indices available. * @param has_idx For the edge features, are there indices available.
* \param N Feature size. * @param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency * @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix. * matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency * @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix. * matrix.
* \param M_block_size block size along the rows of adjacency matrix. * @param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel. * @param kernel The libxsmm kernel.
*/ */
template <typename IdType, typename DType, typename Op, typename Cmp> template <typename IdType, typename DType, typename Op, typename Cmp>
inline void SpMMBlockwiseOpCmp( inline void SpMMBlockwiseOpCmp(
...@@ -391,14 +391,14 @@ inline void SpMMBlockwiseOpCmp( ...@@ -391,14 +391,14 @@ inline void SpMMBlockwiseOpCmp(
} }
/*! /*!
* \brief Free the tiled CSR matrix data. * @brief Free the tiled CSR matrix data.
* \param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency * @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix. * matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency * @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix. * matrix.
* \param use_lhs Whether to use lhs. * @param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs. * @param use_rhs Whether to use rhs.
*/ */
template <typename IdType> template <typename IdType>
inline void SpMMFreeBlocks( inline void SpMMFreeBlocks(
...@@ -413,15 +413,15 @@ inline void SpMMFreeBlocks( ...@@ -413,15 +413,15 @@ inline void SpMMFreeBlocks(
} }
/*! /*!
* \brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format. * @brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes. * @param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges. * @param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling. * @note it uses libxsmm, blocking and dynamic thread scheduling.
*/ */
template <typename IdType, typename DType, typename Op, typename Redop> template <typename IdType, typename DType, typename Op, typename Redop>
void SpMMRedopCsrOpt( void SpMMRedopCsrOpt(
...@@ -551,13 +551,13 @@ void SpMMRedopCsrOpt( ...@@ -551,13 +551,13 @@ void SpMMRedopCsrOpt(
} }
/*! /*!
* \brief Optimized CPU kernel of SpMM-Sum on Csr format. * @brief Optimized CPU kernel of SpMM-Sum on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \note it uses libxsmm, blocking and dynamic thread scheduling. * @note it uses libxsmm, blocking and dynamic thread scheduling.
*/ */
template <typename IdType, typename DType, typename Op> template <typename IdType, typename DType, typename Op>
void SpMMSumCsrLibxsmm( void SpMMSumCsrLibxsmm(
...@@ -569,15 +569,15 @@ void SpMMSumCsrLibxsmm( ...@@ -569,15 +569,15 @@ void SpMMSumCsrLibxsmm(
} }
/*! /*!
* \brief Optimized CPU kernel of SpMM-Min/Max on Csr format. * @brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information. * @param bcast Broadcast information.
* \param csr The Csr matrix. * @param csr The Csr matrix.
* \param ufeat The feature on source nodes. * @param ufeat The feature on source nodes.
* \param efeat The feature on edges. * @param efeat The feature on edges.
* \param out The result feature on destination nodes. * @param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes. * @param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges. * @param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling. * @note it uses libxsmm, blocking and dynamic thread scheduling.
*/ */
template <typename IdType, typename DType, typename Op, typename Cmp> template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrLibxsmm( void SpMMCmpCsrLibxsmm(
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/traversal.cc * @file array/cpu/traversal.cc
* \brief Graph traversal implementation * @brief Graph traversal implementation
*/ */
#include "./traversal.h" #include "./traversal.h"
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/traversal.h * @file array/cpu/traversal.h
* \brief Graph traversal routines. * @brief Graph traversal routines.
* *
* Traversal routines generate frontiers. Frontiers can be node frontiers or * Traversal routines generate frontiers. Frontiers can be node frontiers or
* edge frontiers depending on the traversal function. Each frontier is a list * edge frontiers depending on the traversal function. Each frontier is a list
...@@ -22,7 +22,7 @@ namespace aten { ...@@ -22,7 +22,7 @@ namespace aten {
namespace impl { namespace impl {
/*! /*!
* \brief Traverse the graph in a breadth-first-search (BFS) order. * @brief Traverse the graph in a breadth-first-search (BFS) order.
* *
* The queue object must suffice following interface: * The queue object must suffice following interface:
* Members: * Members:
...@@ -39,12 +39,12 @@ namespace impl { ...@@ -39,12 +39,12 @@ namespace impl {
* The frontier function must be compatible with following interface: * The frontier function must be compatible with following interface:
* void (*make_frontier)(void); * void (*make_frontier)(void);
* *
* \param graph The graph. * @param graph The graph.
* \param sources Source nodes. * @param sources Source nodes.
* \param reversed If true, BFS follows the in-edge direction * @param reversed If true, BFS follows the in-edge direction
* \param queue The queue used to do bfs. * @param queue The queue used to do bfs.
* \param visit The function to call when a node is visited. * @param visit The function to call when a node is visited.
* \param make_frontier The function to indicate that a new froniter can be * @param make_frontier The function to indicate that a new froniter can be
* made; * made;
*/ */
template < template <
...@@ -86,7 +86,7 @@ void BFSTraverseNodes( ...@@ -86,7 +86,7 @@ void BFSTraverseNodes(
} }
/*! /*!
* \brief Traverse the graph in a breadth-first-search (BFS) order, returning * @brief Traverse the graph in a breadth-first-search (BFS) order, returning
* the edges of the BFS tree. * the edges of the BFS tree.
* *
* The queue object must suffice following interface: * The queue object must suffice following interface:
...@@ -104,13 +104,13 @@ void BFSTraverseNodes( ...@@ -104,13 +104,13 @@ void BFSTraverseNodes(
* The frontier function must be compatible with following interface: * The frontier function must be compatible with following interface:
* void (*make_frontier)(void); * void (*make_frontier)(void);
* *
* \param graph The graph. * @param graph The graph.
* \param sources Source nodes. * @param sources Source nodes.
* \param reversed If true, BFS follows the in-edge direction * @param reversed If true, BFS follows the in-edge direction
* \param queue The queue used to do bfs. * @param queue The queue used to do bfs.
* \param visit The function to call when a node is visited. * @param visit The function to call when a node is visited.
* The argument would be edge ID. * The argument would be edge ID.
* \param make_frontier The function to indicate that a new frontier can be * @param make_frontier The function to indicate that a new frontier can be
* made; * made;
*/ */
template < template <
...@@ -154,7 +154,7 @@ void BFSTraverseEdges( ...@@ -154,7 +154,7 @@ void BFSTraverseEdges(
} }
/*! /*!
* \brief Traverse the graph in topological order. * @brief Traverse the graph in topological order.
* *
* The queue object must suffice following interface: * The queue object must suffice following interface:
* Members: * Members:
...@@ -171,11 +171,11 @@ void BFSTraverseEdges( ...@@ -171,11 +171,11 @@ void BFSTraverseEdges(
* The frontier function must be compatible with following interface: * The frontier function must be compatible with following interface:
* void (*make_frontier)(void); * void (*make_frontier)(void);
* *
* \param graph The graph. * @param graph The graph.
* \param reversed If true, follows the in-edge direction * @param reversed If true, follows the in-edge direction
* \param queue The queue used to do bfs. * @param queue The queue used to do bfs.
* \param visit The function to call when a node is visited. * @param visit The function to call when a node is visited.
* \param make_frontier The function to indicate that a new froniter can be * @param make_frontier The function to indicate that a new froniter can be
* made; * made;
*/ */
template < template <
...@@ -233,7 +233,7 @@ enum DFSEdgeTag { ...@@ -233,7 +233,7 @@ enum DFSEdgeTag {
kNonTree, kNonTree,
}; };
/*! /*!
* \brief Traverse the graph in a depth-first-search (DFS) order. * @brief Traverse the graph in a depth-first-search (DFS) order.
* *
* The traversal visit edges in its DFS order. Edges have three tags: * The traversal visit edges in its DFS order. Edges have three tags:
* FORWARD(0), REVERSE(1), NONTREE(2) * FORWARD(0), REVERSE(1), NONTREE(2)
...@@ -243,11 +243,11 @@ enum DFSEdgeTag { ...@@ -243,11 +243,11 @@ enum DFSEdgeTag {
* edge is in the DFS tree. A NONTREE edge is one in which both `u` and `v` have * edge is in the DFS tree. A NONTREE edge is one in which both `u` and `v` have
* been visisted but the edge is NOT in the DFS tree. * been visisted but the edge is NOT in the DFS tree.
* *
* \param source Source node. * @param source Source node.
* \param reversed If true, DFS follows the in-edge direction * @param reversed If true, DFS follows the in-edge direction
* \param has_reverse_edge If true, REVERSE edges are included * @param has_reverse_edge If true, REVERSE edges are included
* \param has_nontree_edge If true, NONTREE edges are included * @param has_nontree_edge If true, NONTREE edges are included
* \param visit The function to call when an edge is visited; the edge id and * @param visit The function to call when an edge is visited; the edge id and
* its tag will be given as the arguments. * its tag will be given as the arguments.
*/ */
template <typename IdType, typename VisitFn> template <typename IdType, typename VisitFn>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/array_cumsum.cu * @file array/cpu/array_cumsum.cu
* \brief Array cumsum GPU implementation * @brief Array cumsum GPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/cpu/array_index_select.cu * @file array/cpu/array_index_select.cu
* \brief Array index select GPU implementation * @brief Array index select GPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment