Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*!
* Copyright (c) 2020 by Contributors
* \file kernel/cpu/gaher_mm.cc
* \brief GatherMM C APIs and definitions.
* @file kernel/cpu/gaher_mm.cc
* @brief GatherMM C APIs and definitions.
*/
#include "./gather_mm.h"
#include <dgl/array.h>
......@@ -9,7 +9,7 @@
namespace dgl {
namespace aten {
/*! \brief Generalized SegmentMM. */
/*! @brief Generalized SegmentMM. */
template <int XPU, typename IdType, typename DType>
void SegmentMM(const NDArray A,
const NDArray B,
......@@ -27,7 +27,7 @@ void SegmentMMBackwardB(const NDArray A,
LOG(FATAL) << "Unsupported CPU kernel for SegmentMMBackwardB.";
}
/*! \brief Generalized GatherMM. */
/*! @brief Generalized GatherMM. */
template <int XPU, typename IdType, typename DType>
void GatherMM(const NDArray A,
const NDArray B,
......@@ -37,7 +37,7 @@ void GatherMM(const NDArray A,
LOG(FATAL) << "Unsupported CPU kernel for GatherMM.";
}
/*! \brief Generalized GatherMM_scatter. */
/*! @brief Generalized GatherMM_scatter. */
template <int XPU, typename IdType, typename DType>
void GatherMMScatter(const NDArray A,
const NDArray B,
......
/*!
* Copyright (c) 2022 by Contributors
* \file array/cpu/gather_mm.h
* \brief GATHER_MM CPU kernel function header.
* @file array/cpu/gather_mm.h
* @brief GATHER_MM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_GATHER_MM_H_
#define DGL_ARRAY_CPU_GATHER_MM_H_
......@@ -46,15 +46,15 @@ void matmul(
}
/*!
* \brief CPU kernel of Gather_mm. The input matrix A is expected to be
* @brief CPU kernel of Gather_mm. The input matrix A is expected to be
* sorted according to relation type.
* \param A The input dense matrix of dimension m x k
* \param B The input dense matrix of dimension k x n
* \param C The output dense matrix od dimension m x n
* \param A_dim1_per_rel The number of rows in each relation in A
* \param B_dim1_per_rel The number of rows in each relation in B
* \param a_trans Matrix A to be transposed
* \param b_trans Matrix B to be transposed
* @param A The input dense matrix of dimension m x k
* @param B The input dense matrix of dimension k x n
* @param C The output dense matrix od dimension m x n
* @param A_dim1_per_rel The number of rows in each relation in A
* @param B_dim1_per_rel The number of rows in each relation in B
* @param a_trans Matrix A to be transposed
* @param b_trans Matrix B to be transposed
*/
template <int XPU, typename IdType, typename DType>
void gatherMM_SortedEtype(
......
/*!
* Copyright (c) 2021 by Contributors
* \file array/cpu/negative_sampling.cc
* \brief Uniform negative sampling on CSR.
* @file array/cpu/negative_sampling.cc
* @brief Uniform negative sampling on CSR.
*/
#include <dgl/array.h>
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_pick.h
* \brief Template implementation for rowwise pick operators.
* @file array/cpu/rowwise_pick.h
* @brief Template implementation for rowwise pick operators.
*/
#ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_
#define DGL_ARRAY_CPU_ROWWISE_PICK_H_
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_sampling.cc
* \brief rowwise sampling
* @file array/cpu/rowwise_sampling.cc
* @brief rowwise sampling
*/
#include <dgl/random.h>
#include <numeric>
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_topk.cc
* \brief rowwise topk
* @file array/cpu/rowwise_topk.cc
* @brief rowwise topk
*/
#include <numeric>
#include <algorithm>
......
/*!
* Copyright (c) 2020 by Contributors
* \file aten/cpu/sddmm.cc
* \brief SDDMM C APIs and definitions.
* @file aten/cpu/sddmm.cc
* @brief SDDMM C APIs and definitions.
*/
#include "./sddmm.h"
#include <dgl/array.h>
......@@ -42,7 +42,7 @@ namespace aten {
} while (0)
/*! \brief Generalized SDDMM on Csr format. */
/*! @brief Generalized SDDMM on Csr format. */
template <int XPU, typename IdType, typename DType>
void SDDMMCsr(const std::string& op,
const BcastOff& bcast,
......@@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op,
});
}
/*! \brief Generalized SDDMM on Csr format with Heterograph support. */
/*! @brief Generalized SDDMM on Csr format with Heterograph support. */
template <int XPU, typename IdType, typename DType>
void SDDMMCsrHetero(const std::string& op,
const BcastOff& bcast,
......@@ -131,7 +131,7 @@ template void SDDMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& in_eid,
const std::vector<dgl_type_t>& out_eid);
/*! \brief Generalized SDDMM on Coo format. */
/*! @brief Generalized SDDMM on Coo format. */
template <int XPU, typename IdType, typename DType>
void SDDMMCoo(const std::string& op,
const BcastOff& bcast,
......@@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op,
});
}
/*! \brief Generalized SDDMM on Coo format with Heterograph support. */
/*! @brief Generalized SDDMM on Coo format with Heterograph support. */
template <int XPU, typename IdType, typename DType>
void SDDMMCooHetero(const std::string& op,
const BcastOff& bcast,
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/sddmm.h
* \brief SDDMM CPU kernel function header.
* @file array/cpu/sddmm.h
* @brief SDDMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SDDMM_H_
#define DGL_ARRAY_CPU_SDDMM_H_
......@@ -17,13 +17,13 @@ namespace aten {
namespace cpu {
/*!
* \brief CPU kernel of g-SDDMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param lhs The left hand side operand feature.
* \param rhs The right hand size operand feature.
* \param out The result feature on edges.
* \note it uses node parallel strategy, different threads are responsible
* @brief CPU kernel of g-SDDMM on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param lhs The left hand side operand feature.
* @param rhs The right hand size operand feature.
* @param out The result feature on edges.
* @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <
......@@ -69,13 +69,13 @@ void SDDMMCsr(
}
/*!
* \brief CPU kernel of g-SDDMM on Coo format.
* \param bcast Broadcast information.
* \param coo The COO matrix.
* \param lhs The left hand side operand feature.
* \param rhs The right hand size operand feature.
* \param out The result feature on edges.
* \note it uses edge parallel strategy, different threads are responsible
* @brief CPU kernel of g-SDDMM on Coo format.
* @param bcast Broadcast information.
* @param coo The COO matrix.
* @param lhs The left hand side operand feature.
* @param rhs The right hand size operand feature.
* @param out The result feature on edges.
* @note it uses edge parallel strategy, different threads are responsible
* for the computation of different edges.
*/
template <
......
/*!
* Copyright (c) 2020 by Contributors
* \file kernel/cpu/segment_reduce.cc
* \brief Segment reduce C APIs and definitions.
* @file kernel/cpu/segment_reduce.cc
* @brief Segment reduce C APIs and definitions.
*/
#include "./segment_reduce.h"
#include <dgl/array.h>
......@@ -11,7 +11,7 @@
namespace dgl {
namespace aten {
/*! \brief Segment Reduce operator. */
/*! @brief Segment Reduce operator. */
template <int XPU, typename IdType, typename DType>
void SegmentReduce(
const std::string& op,
......@@ -34,7 +34,7 @@ void SegmentReduce(
}
}
/*! \brief Scatter Add.*/
/*! @brief Scatter Add.*/
template <int XPU, typename IdType, typename DType>
void ScatterAdd(NDArray feat,
NDArray idx,
......@@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat,
cpu::ScatterAdd<IdType, DType>(feat, idx, out);
}
/*! \brief Update gradients for reduce operator max/min on heterogeneous graph.*/
/*! @brief Update gradients for reduce operator max/min on heterogeneous graph.*/
template <int XPU, typename IdType, typename DType>
void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
const std::string& op,
......@@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
cpu::UpdateGradMinMax_hetero<IdType, DType>(g, op, feat, idx, idx_etype, out);
}
/*! \brief Backward function of segment cmp.*/
/*! @brief Backward function of segment cmp.*/
template <int XPU, typename IdType, typename DType>
void BackwardSegmentCmp(
NDArray feat,
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief Segment reduce kernel function header.
* @file array/cpu/spmm.h
* @brief Segment reduce kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
#define DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
......@@ -18,10 +18,10 @@ namespace aten {
namespace cpu {
/*!
* \brief CPU kernel of segment sum.
* \param feat The input tensor.
* \param offsets The offset tensor storing the ranges of segments.
* \param out The output tensor.
* @brief CPU kernel of segment sum.
* @param feat The input tensor.
* @param offsets The offset tensor storing the ranges of segments.
* @param out The output tensor.
*/
template <typename IdType, typename DType>
void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
......@@ -43,11 +43,11 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
}
/*!
* \brief CPU kernel of segment min/max.
* \param feat The input tensor.
* \param offsets The offset tensor storing the ranges of segments.
* \param out The output tensor.
* \param arg An auxiliary tensor storing the argmin/max information
* @brief CPU kernel of segment min/max.
* @param feat The input tensor.
* @param offsets The offset tensor storing the ranges of segments.
* @param out The output tensor.
* @param arg An auxiliary tensor storing the argmin/max information
* used in backward phase.
*/
template <typename IdType, typename DType, typename Cmp>
......@@ -77,11 +77,11 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) {
}
/*!
* \brief CPU kernel of Scatter Add (on first dimension) operator.
* \note math equation: out[idx[i], *] += feat[i, *]
* \param feat The input tensor.
* \param idx The indices tensor.
* \param out The output tensor.
* @brief CPU kernel of Scatter Add (on first dimension) operator.
* @note math equation: out[idx[i], *] += feat[i, *]
* @param feat The input tensor.
* @param idx The indices tensor.
* @param out The output tensor.
*/
template <typename IdType, typename DType>
void ScatterAdd(NDArray feat, NDArray idx, NDArray out) {
......@@ -102,13 +102,13 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) {
}
/*!
* \brief CPU kernel to update gradients for reduce op max/min
* \param graph The input heterogeneous graph.
* \param op The binary operator, could be `copy_u`, `copy_e'.
* \param list_feat List of the input tensors.
* \param list_idx List of the indices tensors.
* \param list_idx_etype List of the node- or edge-type tensors.
* \param list_out List of the output tensors.
* @brief CPU kernel to update gradients for reduce op max/min
* @param graph The input heterogeneous graph.
* @param op The binary operator, could be `copy_u`, `copy_e'.
* @param list_feat List of the input tensors.
* @param list_idx List of the indices tensors.
* @param list_idx_etype List of the node- or edge-type tensors.
* @param list_out List of the output tensors.
*/
template <typename IdType, typename DType>
void UpdateGradMinMax_hetero(
......@@ -160,11 +160,11 @@ void UpdateGradMinMax_hetero(
}
/*!
* \brief CPU kernel of backward phase of segment min/max.
* \note math equation: out[arg[i, k], k] = feat[i, k]
* \param feat The input tensor.
* \param arg The argmin/argmax tensor.
* \param out The output tensor.
* @brief CPU kernel of backward phase of segment min/max.
* @note math equation: out[arg[i, k], k] = feat[i, k]
* @param feat The input tensor.
* @param arg The argmin/argmax tensor.
* @param out The output tensor.
*/
template <typename IdType, typename DType>
void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) {
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/spmat_op_impl.cc
* \brief CPU implementation of COO sparse matrix operators
* @file array/cpu/spmat_op_impl.cc
* @brief CPU implementation of COO sparse matrix operators
*/
#include <dgl/runtime/parallel_for.h>
#include <dmlc/omp.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/spmat_op_impl_csr.cc
* \brief CSR matrix operator CPU implementation
* @file array/cpu/spmat_op_impl_csr.cc
* @brief CSR matrix operator CPU implementation
*/
#include <dgl/array.h>
#include <dgl/runtime/parallel_for.h>
......
/*!
* Copyright (c) 2020 by Contributors
* \file kernel/cpu/spmm.cc
* \brief SPMM C APIs and definitions.
* @file kernel/cpu/spmm.cc
* @brief SPMM C APIs and definitions.
*/
#include "./spmm.h"
#include <dgl/array.h>
......@@ -9,7 +9,7 @@
namespace dgl {
namespace aten {
/*! \brief Generalized SpMM on Csr format. */
/*! @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType>
void SpMMCsr(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
......@@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce,
}
}
/*! \brief Generalized SpMM on Csr format. */
/*! @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType>
void SpMMCsrHetero(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
......@@ -159,7 +159,7 @@ template void SpMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& ufeat_node_tids,
const std::vector<dgl_type_t>& out_node_tids);
/*! \brief Edge_softmax_csr forward op on Csr format. */
/*! @brief Edge_softmax_csr forward op on Csr format. */
template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_forward(const std::string& op,
const BcastOff& bcast,
......@@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op,
});
}
/*! \brief Edge_softmax_csr backward op on Csr format. */
/*! @brief Edge_softmax_csr backward op on Csr format. */
template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_backward(const std::string& op,
const BcastOff& bcast,
......@@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward<kDGLCPU, int64_t, double>(
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out);
/*! \brief Generalized SpMM on Coo format. */
/*! @brief Generalized SpMM on Coo format. */
template <int XPU, typename IdType, typename DType>
void SpMMCoo(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
* @file array/cpu/spmm.h
* @brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
......@@ -33,14 +33,14 @@ namespace cpu {
#if !defined(_WIN32)
#ifdef USE_AVX
/*!
* \brief CPU kernel of SpMM on Csr format using Xbyak.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* @brief CPU kernel of SpMM on Csr format using Xbyak.
* @param cpu_spec JIT'ed kernel
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param X The feature on source nodes.
* @param W The feature on edges.
* @param O The result feature on destination nodes.
* @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. For each edge, it uses the
* JIT'ed kernel.
*/
......@@ -70,14 +70,14 @@ void SpMMSumCsrXbyak(
#endif // _WIN32
/*!
* \brief Naive CPU kernel of SpMM on Csr format.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* @brief Naive CPU kernel of SpMM on Csr format.
* @param cpu_spec JIT'ed kernel
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param X The feature on source nodes.
* @param W The feature on edges.
* @param O The result feature on destination nodes.
* @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
......@@ -111,13 +111,13 @@ void SpMMSumCsrNaive(
}
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* @brief CPU kernel of SpMM on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
......@@ -177,13 +177,13 @@ void SpMMSumCsr(
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* @brief CPU kernel of SpMM on Coo format.
* @param bcast Broadcast information.
* @param coo The Coo matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
......@@ -226,23 +226,23 @@ void SpMMSumCoo(
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* @brief CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* @param arge Arg-Min/Max on edges. which refers the source node indices
correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \note It uses node parallel strategy, different threads are responsible for
* @note It uses node parallel strategy, different threads are responsible for
* the computation of different nodes.
* \note The result will contain infinity for zero-degree nodes.
* @note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(
......@@ -323,30 +323,30 @@ void SpMMCmpCsr(
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* @brief CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* @param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param argu_ntype Node type of the arg-Min/Max on source nodes, which refers
* @param argu_ntype Node type of the arg-Min/Max on source nodes, which refers
* the source node types correspond to the minimum/maximum values of
* reduction result on destination nodes. It's useful in computing
* gradients of Min/Max reducer.
* \param arge_etype Edge-type of the arg-Min/Max on edges. which refers the
* @param arge_etype Edge-type of the arg-Min/Max on edges. which refers the
* source node indices correspond to the minimum/maximum values of
* reduction result on destination nodes. It's useful in computing
* gradients of Min/Max reducer.
* \param src_type Node type of the source nodes of an etype
* \param etype Edge type
* @param src_type Node type of the source nodes of an etype
* @param etype Edge type
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrHetero(
......@@ -419,24 +419,24 @@ void SpMMCmpCsrHetero(
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* @brief CPU kernel of SpMM-Min/Max on Coo format.
* @param bcast Broadcast information.
* @param coo The Coo matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* @param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \note it uses node parallel strategy, different threads are responsible for
* @note it uses node parallel strategy, different threads are responsible for
* the computation of different nodes. To avoid possible data hazard, we
* use atomic operators in the reduction phase.
* \note The result will contain infinity for zero-degree nodes.
* @note The result will contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(
......@@ -485,12 +485,12 @@ void SpMMCmpCoo(
}
/*!
* \brief CPU kernel of Edge_softmax_csr_forward on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result of edge_softmax_forward.
* @brief CPU kernel of Edge_softmax_csr_forward on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result of edge_softmax_forward.
*/
template <typename IdType, typename DType, typename Op>
void Edge_softmax_csr_forward(
......@@ -533,12 +533,12 @@ void Edge_softmax_csr_forward(
}
/*!
* \brief CPU kernel of Edge_softmax_csr_backward on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param out The result of forward.
* \param sds The result of gradiet * out.
* \param back_out The result of edge_softmax_backward.
* @brief CPU kernel of Edge_softmax_csr_backward on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param out The result of forward.
* @param sds The result of gradiet * out.
* @param back_out The result of edge_softmax_backward.
*/
template <typename IdType, typename DType, typename Op>
void Edge_softmax_csr_backward(
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm_binary_ops.h
* \brief SPMM CPU Binary ops.
* @file array/cpu/spmm_binary_ops.h
* @brief SPMM CPU Binary ops.
*/
#ifndef DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_
#define DGL_ARRAY_CPU_SPMM_BINARY_OPS_H_
......
/*!
* Copyright (c) 2021 Intel Corporation
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
* \author Sanchit Misra <sanchit.misra@intel.com>,
* @file array/cpu/spmm.h
* @brief SPMM CPU kernel function header.
* @author Sanchit Misra <sanchit.misra@intel.com>,
* Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>,
* Vasimuddin Md <vasimuddin.md@intel.com>,
* Sasikanth Avancha <sasikanth.avancha@intel.com>
......@@ -49,19 +49,19 @@ int32_t GetLLCSize() {
}
/*!
* \brief Tile the CSR matrix to roughly make sure that the column tiles and
* @brief Tile the CSR matrix to roughly make sure that the column tiles and
* corresponding neighbor features fit into LLC and the row tiles
* are assigned to OMP threads.
* \param csr The Csr matrix.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency
* @param csr The Csr matrix.
* @param block_csr_array The array containing csr matrices of all blocks.
* @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency
* @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param K_block_size block size along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
* @param M_block_size block size along the rows of adjacency matrix.
* @param K_block_size block size along the columns of adjacency matrix.
* @param use_lhs Whether to use lhs.
* @param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMCreateBlocks(
......@@ -166,12 +166,12 @@ inline void SpMMCreateBlocks(
}
/*!
* \brief Create libxsmm kernel.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param redop_flag Flag specifying the reduction operation.
* \param is_cmp Is the reduction operation a compare operation.
* \note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel.
* @brief Create libxsmm kernel.
* @param has_idx For the edge features, are there indices available.
* @param N Feature size.
* @param redop_flag Flag specifying the reduction operation.
* @param is_cmp Is the reduction operation a compare operation.
* @note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel.
* Given a node u, the kernel performs an elementwise "Op" on the
* features of the neighbors and/or the edges incident on u.
* Subsequently, it performs an elementwise "Redop" on all such
......@@ -270,19 +270,19 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel(
}
/*!
* \brief Use libxsmm to perform SpMM-Sum on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency
* @brief Use libxsmm to perform SpMM-Sum on all blocks.
* @param block_csr_array The array containing csr matrices of all blocks.
* @param B The feature on source nodes.
* @param E The feature on edges.
* @param C The result feature on destination nodes.
* @param has_idx For the edge features, are there indices available.
* @param N Feature size.
* @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency
* @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
* @param M_block_size block size along the rows of adjacency matrix.
* @param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType>
inline void SpMMBlockwiseOpSum(
......@@ -327,21 +327,21 @@ inline void SpMMBlockwiseOpSum(
}
/*!
* \brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param argB Arg-Min/Max on source nodes.
* \param argE Arg-Min/Max on edges.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency
* @brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* @param block_csr_array The array containing csr matrices of all blocks.
* @param B The feature on source nodes.
* @param E The feature on edges.
* @param C The result feature on destination nodes.
* @param argB Arg-Min/Max on source nodes.
* @param argE Arg-Min/Max on edges.
* @param has_idx For the edge features, are there indices available.
* @param N Feature size.
* @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency
* @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
* @param M_block_size block size along the rows of adjacency matrix.
* @param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
inline void SpMMBlockwiseOpCmp(
......@@ -391,14 +391,14 @@ inline void SpMMBlockwiseOpCmp(
}
/*!
* \brief Free the tiled CSR matrix data.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency
* @brief Free the tiled CSR matrix data.
* @param block_csr_array The array containing csr matrices of all blocks.
* @param num_M_blocks Number of blocks to create along the rows of adjacency
* matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency
* @param num_K_blocks Number of blocks to create along the columns of adjacency
* matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
* @param use_lhs Whether to use lhs.
* @param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMFreeBlocks(
......@@ -413,15 +413,15 @@ inline void SpMMFreeBlocks(
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
* @brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @param argu Arg-Min/Max on source nodes.
* @param arge Arg-Min/Max on edges.
* @note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Redop>
void SpMMRedopCsrOpt(
......@@ -551,13 +551,13 @@ void SpMMRedopCsrOpt(
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
* @brief Optimized CPU kernel of SpMM-Sum on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrLibxsmm(
......@@ -569,15 +569,15 @@ void SpMMSumCsrLibxsmm(
}
/*!
* \brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
* @brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information.
* @param csr The Csr matrix.
* @param ufeat The feature on source nodes.
* @param efeat The feature on edges.
* @param out The result feature on destination nodes.
* @param argu Arg-Min/Max on source nodes.
* @param arge Arg-Min/Max on edges.
* @note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrLibxsmm(
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/traversal.cc
* \brief Graph traversal implementation
* @file array/cpu/traversal.cc
* @brief Graph traversal implementation
*/
#include "./traversal.h"
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/traversal.h
* \brief Graph traversal routines.
* @file array/cpu/traversal.h
* @brief Graph traversal routines.
*
* Traversal routines generate frontiers. Frontiers can be node frontiers or
* edge frontiers depending on the traversal function. Each frontier is a list
......@@ -22,7 +22,7 @@ namespace aten {
namespace impl {
/*!
* \brief Traverse the graph in a breadth-first-search (BFS) order.
* @brief Traverse the graph in a breadth-first-search (BFS) order.
*
* The queue object must suffice following interface:
* Members:
......@@ -39,12 +39,12 @@ namespace impl {
* The frontier function must be compatible with following interface:
* void (*make_frontier)(void);
*
* \param graph The graph.
* \param sources Source nodes.
* \param reversed If true, BFS follows the in-edge direction
* \param queue The queue used to do bfs.
* \param visit The function to call when a node is visited.
* \param make_frontier The function to indicate that a new froniter can be
* @param graph The graph.
* @param sources Source nodes.
* @param reversed If true, BFS follows the in-edge direction
* @param queue The queue used to do bfs.
* @param visit The function to call when a node is visited.
* @param make_frontier The function to indicate that a new froniter can be
* made;
*/
template <
......@@ -86,7 +86,7 @@ void BFSTraverseNodes(
}
/*!
* \brief Traverse the graph in a breadth-first-search (BFS) order, returning
* @brief Traverse the graph in a breadth-first-search (BFS) order, returning
* the edges of the BFS tree.
*
* The queue object must suffice following interface:
......@@ -104,13 +104,13 @@ void BFSTraverseNodes(
* The frontier function must be compatible with following interface:
* void (*make_frontier)(void);
*
* \param graph The graph.
* \param sources Source nodes.
* \param reversed If true, BFS follows the in-edge direction
* \param queue The queue used to do bfs.
* \param visit The function to call when a node is visited.
* @param graph The graph.
* @param sources Source nodes.
* @param reversed If true, BFS follows the in-edge direction
* @param queue The queue used to do bfs.
* @param visit The function to call when a node is visited.
* The argument would be edge ID.
* \param make_frontier The function to indicate that a new frontier can be
* @param make_frontier The function to indicate that a new frontier can be
* made;
*/
template <
......@@ -154,7 +154,7 @@ void BFSTraverseEdges(
}
/*!
* \brief Traverse the graph in topological order.
* @brief Traverse the graph in topological order.
*
* The queue object must suffice following interface:
* Members:
......@@ -171,11 +171,11 @@ void BFSTraverseEdges(
* The frontier function must be compatible with following interface:
* void (*make_frontier)(void);
*
* \param graph The graph.
* \param reversed If true, follows the in-edge direction
* \param queue The queue used to do bfs.
* \param visit The function to call when a node is visited.
* \param make_frontier The function to indicate that a new froniter can be
* @param graph The graph.
* @param reversed If true, follows the in-edge direction
* @param queue The queue used to do bfs.
* @param visit The function to call when a node is visited.
* @param make_frontier The function to indicate that a new froniter can be
* made;
*/
template <
......@@ -233,7 +233,7 @@ enum DFSEdgeTag {
kNonTree,
};
/*!
* \brief Traverse the graph in a depth-first-search (DFS) order.
* @brief Traverse the graph in a depth-first-search (DFS) order.
*
* The traversal visit edges in its DFS order. Edges have three tags:
* FORWARD(0), REVERSE(1), NONTREE(2)
......@@ -243,11 +243,11 @@ enum DFSEdgeTag {
* edge is in the DFS tree. A NONTREE edge is one in which both `u` and `v` have
* been visisted but the edge is NOT in the DFS tree.
*
* \param source Source node.
* \param reversed If true, DFS follows the in-edge direction
* \param has_reverse_edge If true, REVERSE edges are included
* \param has_nontree_edge If true, NONTREE edges are included
* \param visit The function to call when an edge is visited; the edge id and
* @param source Source node.
* @param reversed If true, DFS follows the in-edge direction
* @param has_reverse_edge If true, REVERSE edges are included
* @param has_nontree_edge If true, NONTREE edges are included
* @param visit The function to call when an edge is visited; the edge id and
* its tag will be given as the arguments.
*/
template <typename IdType, typename VisitFn>
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/array_cumsum.cu
* \brief Array cumsum GPU implementation
* @file array/cpu/array_cumsum.cu
* @brief Array cumsum GPU implementation
*/
#include <dgl/array.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/array_index_select.cu
* \brief Array index select GPU implementation
* @file array/cpu/array_index_select.cu
* @brief Array index select GPU implementation
*/
#include <dgl/array.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment