Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*!
* Copyright (c) 2021 by Contributors
* \file array/cpu/array_index_select_uvm.cuh
* \brief Array index select GPU kernel implementation
* @file array/cpu/array_index_select_uvm.cuh
* @brief Array index select GPU kernel implementation
*/
#ifndef DGL_ARRAY_CUDA_UVM_ARRAY_INDEX_SELECT_UVM_CUH_
......
/*!
* Copyright (c) 2021 by Contributors
* \file array/filter.cc
* \brief Object for selecting items in a set, or selecting items not in a set.
* @file array/filter.cc
* @brief Object for selecting items in a set, or selecting items not in a set.
*/
#include "./filter.h"
......
/*!
* Copyright (c) 2021 by Contributors
* \file array/filter.h
* \brief Object for selecting items in a set, or selecting items not in a set.
* @file array/filter.h
* @brief Object for selecting items in a set, or selecting items not in a set.
*/
#ifndef DGL_ARRAY_FILTER_H_
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/kernel.cc
* \brief New kernels
* @file array/kernel.cc
* @brief New kernels
*/
#include <dgl/packed_func_ext.h>
#include <dgl/base_heterograph.h>
......@@ -23,7 +23,7 @@ namespace {
} // namespace
/*! \brief Generalized Sparse Matrix-Matrix Multiplication. */
/*! @brief Generalized Sparse Matrix-Matrix Multiplication. */
void SpMM(const std::string& op, const std::string& reduce,
HeteroGraphPtr graph,
NDArray ufeat,
......@@ -54,7 +54,7 @@ void SpMM(const std::string& op, const std::string& reduce,
}
/*! \brief Generalized segmented dense Matrix-Matrix Multiplication. */
/*! @brief Generalized segmented dense Matrix-Matrix Multiplication. */
void SegmentMM(const NDArray A,
const NDArray B,
NDArray C,
......@@ -102,7 +102,7 @@ void SegmentMMBackwardB(const NDArray A,
}
/*! \brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */
/*! @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */
void GatherMM(const NDArray A,
const NDArray B,
NDArray C,
......@@ -139,7 +139,7 @@ void GatherMM(const NDArray A,
}
/*! \brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */
/*! @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */
void GatherMMScatter(const NDArray A,
const NDArray B,
NDArray C,
......@@ -179,7 +179,7 @@ void GatherMMScatter(const NDArray A,
}
/*! \brief Generalized Sparse Matrix-Matrix Multiplication with hetero-graph support. */
/*! @brief Generalized Sparse Matrix-Matrix Multiplication with hetero-graph support. */
void SpMMHetero(const std::string& op, const std::string& reduce,
HeteroGraphPtr graph,
const std::vector<NDArray>& ufeat_vec,
......@@ -227,7 +227,7 @@ void SpMMHetero(const std::string& op, const std::string& reduce,
}
/*! \brief Generalized Sampled Dense-Dense Matrix Multiplication. */
/*! @brief Generalized Sampled Dense-Dense Matrix Multiplication. */
void SDDMM(const std::string& op,
HeteroGraphPtr graph,
NDArray lhs,
......@@ -259,11 +259,11 @@ void SDDMM(const std::string& op,
}
/*!
* \brief Find the src/dst/etype id based on the target 'u', 'v' or 'e'.
* @brief Find the src/dst/etype id based on the target 'u', 'v' or 'e'.
*
* \param graph The input graph.
* \param target 'u', 'v' or 'e'. The target of the lhs or rhs data of an etype.
* \param etype Relation type of the input graph.
* @param graph The input graph.
* @param target 'u', 'v' or 'e'. The target of the lhs or rhs data of an etype.
* @param etype Relation type of the input graph.
*/
int get_typeid_by_target(HeteroGraphPtr graph, int target, dgl_type_t etype) {
auto pair = graph->meta_graph()->FindEdge(etype);
......@@ -274,7 +274,7 @@ int get_typeid_by_target(HeteroGraphPtr graph, int target, dgl_type_t etype) {
return etype;
}
/*! \brief Generalized Sampled Dense-Dense Matrix Multiplication. */
/*! @brief Generalized Sampled Dense-Dense Matrix Multiplication. */
void SDDMMHetero(const std::string& op,
HeteroGraphPtr graph,
std::vector<NDArray> lhs,
......@@ -322,7 +322,7 @@ void SDDMMHetero(const std::string& op,
}
/*! \brief Generalized Edge_softmax op for forward */
/*! @brief Generalized Edge_softmax op for forward */
void Edge_softmax_forward(const std::string& op,
HeteroGraphPtr graph,
NDArray ufeat,
......@@ -342,7 +342,7 @@ void Edge_softmax_forward(const std::string& op,
}
/*! \brief Generalized Edge_softmax op for backward */
/*! @brief Generalized Edge_softmax op for backward */
void Edge_softmax_backward(const std::string& op,
HeteroGraphPtr graph,
NDArray out,
......@@ -372,7 +372,7 @@ NDArray GetEdgeMapping(HeteroGraphRef graph) {
}
}
/*! \brief Segment reduce dispatch function. */
/*! @brief Segment reduce dispatch function. */
void SegmentReduceDispatch(const std::string& op,
NDArray feat,
NDArray offsets,
......@@ -387,7 +387,7 @@ void SegmentReduceDispatch(const std::string& op,
});
}
/*! \brief Scatter Add (on first dimension) dispatch function. */
/*! @brief Scatter Add (on first dimension) dispatch function. */
void ScatterAddDispatch(NDArray feat, NDArray idx, NDArray out) {
ATEN_XPU_SWITCH_CUDA(feat->ctx.device_type, XPU, "ScatterAdd", {
ATEN_ID_TYPE_SWITCH(idx->dtype, IdType, {
......@@ -398,7 +398,7 @@ void ScatterAddDispatch(NDArray feat, NDArray idx, NDArray out) {
});
}
/*! \brief Update gradients (reduce op max/min) dispatch function on heterogeneous graph. */
/*! @brief Update gradients (reduce op max/min) dispatch function on heterogeneous graph. */
void UpdateGradMinMaxDispatchHetero(const HeteroGraphPtr& graph,
const std::string& op,
const std::vector<NDArray>& feat,
......@@ -416,7 +416,7 @@ void UpdateGradMinMaxDispatchHetero(const HeteroGraphPtr& graph,
});
}
/*! \brief Backward segment cmp dispatch function.*/
/*! @brief Backward segment cmp dispatch function.*/
void BackwardSegmentCmpDispatch(NDArray feat, NDArray arg, NDArray out) {
ATEN_XPU_SWITCH_CUDA(feat->ctx.device_type, XPU, "BackwardSegmentCmp", {
ATEN_ID_TYPE_SWITCH(arg->dtype, IdType, {
......@@ -724,14 +724,14 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLKernelGetEdgeMapping")
});
/*!
* \brief Sparse matrix multiplication with graph interface.
* @brief Sparse matrix multiplication with graph interface.
*
* \param A_ref The left operand.
* \param A_weights The edge weights of graph A.
* \param B_ref The right operand.
* \param B_weights The edge weights of graph B.
* \param num_vtypes The number of vertex types of the graph to be returned.
* \return A pair consisting of the new graph as well as its edge weights.
* @param A_ref The left operand.
* @param A_weights The edge weights of graph A.
* @param B_ref The right operand.
* @param B_weights The edge weights of graph B.
* @param num_vtypes The number of vertex types of the graph to be returned.
* @return A pair consisting of the new graph as well as its edge weights.
*/
DGL_REGISTER_GLOBAL("sparse._CAPI_DGLCSRMM")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/kernel_decl.h
* \brief Sparse matrix format-specific operator declarations.
* @file array/kernel_decl.h
* @brief Sparse matrix format-specific operator declarations.
*/
#ifndef DGL_ARRAY_KERNEL_DECL_H_
#define DGL_ARRAY_KERNEL_DECL_H_
......@@ -18,7 +18,7 @@ namespace dgl {
namespace aten {
/*!
* \brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format.
* @brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format.
*/
template <int XPU, typename IdType, typename DType>
void SpMMCsr(const std::string& op, const std::string& reduce,
......@@ -30,7 +30,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce,
std::vector<NDArray> out_aux);
/*!
* \brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format
* @brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format
with heterograph support.
*/
template <int XPU, typename IdType, typename DType>
......@@ -44,7 +44,7 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
const std::vector<dgl_type_t>& ufeat_eid,
const std::vector<dgl_type_t>& out_eid);
/*!
* \brief Generalized Sparse Matrix Dense Matrix Multiplication on Coo format.
* @brief Generalized Sparse Matrix Dense Matrix Multiplication on Coo format.
*/
template <int XPU, typename IdType, typename DType>
void SpMMCoo(const std::string& op, const std::string& reduce,
......@@ -56,7 +56,7 @@ void SpMMCoo(const std::string& op, const std::string& reduce,
std::vector<NDArray> out_aux);
/*!
* \brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr format.
* @brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr format.
*/
template <int XPU, typename IdType, typename DType>
void SDDMMCsr(const std::string& op,
......@@ -68,7 +68,7 @@ void SDDMMCsr(const std::string& op,
int lhs_target,
int rhs_target);
/*!
* \brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr
* @brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr
format with heterograph support.
*/
template <int XPU, typename IdType, typename DType>
......@@ -84,7 +84,7 @@ void SDDMMCsrHetero(const std::string& op,
const std::vector<dgl_type_t>& out_eid);
/*!
* \brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo format.
* @brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo format.
*/
template <int XPU, typename IdType, typename DType>
void SDDMMCoo(const std::string& op,
......@@ -97,7 +97,7 @@ void SDDMMCoo(const std::string& op,
int rhs_target);
/*!
* \brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo
* @brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo
format with heterograph support.
*/
template <int XPU, typename IdType, typename DType>
......@@ -113,7 +113,7 @@ void SDDMMCooHetero(const std::string& op,
const std::vector<dgl_type_t>& rhs_eid);
/*!
* \brief Generalized Dense Matrix-Matrix Multiplication according to relation types.
* @brief Generalized Dense Matrix-Matrix Multiplication according to relation types.
*/
template <int XPU, typename IdType, typename DType>
void GatherMM(const NDArray A,
......@@ -123,7 +123,7 @@ void GatherMM(const NDArray A,
const NDArray idx_b);
/*!
* \brief Generalized Dense Matrix-Matrix Multiplication according to relation types.
* @brief Generalized Dense Matrix-Matrix Multiplication according to relation types.
*/
template <int XPU, typename IdType, typename DType>
void GatherMMScatter(const NDArray A,
......@@ -134,7 +134,7 @@ void GatherMMScatter(const NDArray A,
const NDArray idx_c);
/*!
* \brief Generalized segmented dense Matrix-Matrix Multiplication.
* @brief Generalized segmented dense Matrix-Matrix Multiplication.
*/
template <int XPU, typename IdType, typename DType>
void SegmentMM(const NDArray A,
......@@ -150,7 +150,7 @@ void SegmentMMBackwardB(const NDArray A,
const NDArray seglen);
/*!
* \brief Segment reduce.
* @brief Segment reduce.
*/
template <int XPU, typename IdType, typename DType>
void SegmentReduce(const std::string& op,
......@@ -160,7 +160,7 @@ void SegmentReduce(const std::string& op,
NDArray arg);
/*!
* \brief Scatter Add on first dimension.
* @brief Scatter Add on first dimension.
*/
template <int XPU, typename IdType, typename DType>
void ScatterAdd(NDArray feat,
......@@ -168,7 +168,7 @@ void ScatterAdd(NDArray feat,
NDArray out);
/*!
* \brief Update gradients for reduce operator max and min on first dimension.
* @brief Update gradients for reduce operator max and min on first dimension.
*/
template <int XPU, typename IdType, typename DType>
void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
......@@ -179,7 +179,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
std::vector<NDArray>* out);
/*!
* \brief Backward function of segment cmp.
* @brief Backward function of segment cmp.
*/
template <int XPU, typename IdType, typename DType>
void BackwardSegmentCmp(NDArray feat,
......@@ -187,16 +187,16 @@ void BackwardSegmentCmp(NDArray feat,
NDArray out);
/*!
* \brief Sparse-sparse matrix multiplication
* @brief Sparse-sparse matrix multiplication
*
* \param A The left operand.
* \param A_weights The weights of matrix as a 1D tensor.
* \param B The right operand.
* \param B_weights The weights of matrix as a 1D tensor.
* @param A The left operand.
* @param A_weights The weights of matrix as a 1D tensor.
* @param B The right operand.
* @param B_weights The weights of matrix as a 1D tensor.
*
* \note GPU implementation will cast the indices to 32 bit.
* \note The zero entries in the result are not removed.
* \note The CSR matrix should not have duplicate entries.
* @note GPU implementation will cast the indices to 32 bit.
* @note The zero entries in the result are not removed.
* @note The CSR matrix should not have duplicate entries.
*/
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRMM(
......@@ -206,14 +206,14 @@ std::pair<CSRMatrix, NDArray> CSRMM(
NDArray B_weights);
/*!
* \brief Sparse-sparse matrix summation.
* @brief Sparse-sparse matrix summation.
*
* \param A The sparse matrices with the same size.
* \param A_weights The weights of each sparse matrix as a 1D tensor.
* @param A The sparse matrices with the same size.
* @param A_weights The weights of each sparse matrix as a 1D tensor.
*
* \note GPU implementation will cast the indices to 32 bit.
* \note The zero entries in the result are not removed.
* \note The CSR matrix should not have duplicate entries.
* @note GPU implementation will cast the indices to 32 bit.
* @note The zero entries in the result are not removed.
* @note The CSR matrix should not have duplicate entries.
*/
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRSum(
......@@ -221,7 +221,7 @@ std::pair<CSRMatrix, NDArray> CSRSum(
const std::vector<NDArray>& A_weights);
/*!
* \brief Edge_softmax_csr forward function on Csr format.
* @brief Edge_softmax_csr forward function on Csr format.
*/
template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_forward(const std::string& op,
......@@ -231,7 +231,7 @@ void Edge_softmax_csr_forward(const std::string& op,
NDArray efeat,
NDArray out);
/*!
* \brief Edge_softmax_csr backward function on Csr format.
* @brief Edge_softmax_csr backward function on Csr format.
*/
template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_backward(const std::string& op,
......
......@@ -43,7 +43,7 @@ int32_t Ver2partition(IdType in_val, int64_t *node_map, int32_t num_parts) {
}
/*!
* \brief Identifies the lead loaded partition/community for a given edge
* @brief Identifies the lead loaded partition/community for a given edge
* assignment.
*/
int32_t LeastLoad(int64_t *community_edges, int32_t nc) {
......@@ -66,23 +66,23 @@ int32_t LeastLoad(int64_t *community_edges, int32_t nc) {
}
/*!
* \brief Libra - vertexcut based graph partitioning.
* @brief Libra - vertexcut based graph partitioning.
* It takes list of edges from input DGL graph and distributed them among nc
* partitions During edge distribution, Libra assign a given edge to a partition
* based on the end vertices, in doing so, it tries to minimized the splitting
* of the graph vertices. In case of conflict Libra assigns an edge to the least
* loaded partition/community.
* \param[in] nc Number of partitions/communities
* \param[in] node_degree per node degree
* \param[in] edgenum_unassigned node degree
* \param[out] community_weights weight of the created partitions
* \param[in] u src nodes
* \param[in] v dst nodes
* \param[out] w weight per edge
* \param[out] out partition assignment of the edges
* \param[in] N_n number of nodes in the input graph
* \param[in] N_e number of edges in the input graph
* \param[in] prefix output/partition storage location
* @param[in] nc Number of partitions/communities
* @param[in] node_degree per node degree
* @param[in] edgenum_unassigned node degree
* @param[out] community_weights weight of the created partitions
* @param[in] u src nodes
* @param[in] v dst nodes
* @param[out] w weight per edge
* @param[out] out partition assignment of the edges
* @param[in] N_n number of nodes in the input graph
* @param[in] N_e number of edges in the input graph
* @param[in] prefix output/partition storage location
*/
template <typename IdType, typename IdType2>
void LibraVertexCut(
......@@ -325,31 +325,31 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibraVertexCut")
});
/*!
* \brief
* @brief
* 1. Builds dictionary (ldt) for assigning local node IDs to nodes in the
* partitions
* 2. Builds dictionary (gdt) for storing copies (local ID) of split nodes
* These dictionaries will be used in the subsequesnt stages to setup
* tracking of split nodes copies across the partition, setting up partition
* `ndata` dictionaries.
* \param[out] a local src node ID of an edge in a partition
* \param[out] b local dst node ID of an edge in a partition
* \param[-] indices temporary memory, keeps track of global node ID to local
* @param[out] a local src node ID of an edge in a partition
* @param[out] b local dst node ID of an edge in a partition
* @param[-] indices temporary memory, keeps track of global node ID to local
* node ID in a partition
* \param[out] ldt_key per partition dict for storing global and local node IDs
* @param[out] ldt_key per partition dict for storing global and local node IDs
* (consecutive)
* \param[out] gdt_key global dict for storing number of local nodes (or split
* @param[out] gdt_key global dict for storing number of local nodes (or split
* nodes) for a given global node ID
* \param[out] gdt_value global dict, stores local node IDs (due to split)
* @param[out] gdt_value global dict, stores local node IDs (due to split)
* across partitions for a given global node ID
* \param[out] node_map keeps track of range of local node IDs (consecutive)
* @param[out] node_map keeps track of range of local node IDs (consecutive)
* given to the nodes in the partitions
* \param[in, out] offset start of the range of local node IDs for this
* @param[in, out] offset start of the range of local node IDs for this
* partition
* \param[in] nc number of partitions/communities
* \param[in] c current partition number \param[in] fsize size of pre-allocated
* @param[in] nc number of partitions/communities
* @param[in] c current partition number \param[in] fsize size of pre-allocated
* memory tensor
* \param[in] prefix input Libra partition file location
* @param[in] prefix input Libra partition file location
*/
List<Value> Libra2dglBuildDict(
NDArray a, NDArray b, NDArray indices, NDArray ldt_key, NDArray gdt_key,
......@@ -457,14 +457,14 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibra2dglBuildDict")
});
/*!
* \brief sets up the 1-level tree among the clones of the split-nodes.
* \param[in] gdt_key global dict for assigning consecutive node IDs to nodes
* @brief sets up the 1-level tree among the clones of the split-nodes.
* @param[in] gdt_key global dict for assigning consecutive node IDs to nodes
* across all the partitions
* \param[in] gdt_value global dict for assigning consecutive node IDs to nodes
* @param[in] gdt_value global dict for assigning consecutive node IDs to nodes
* across all the partition
* \param[out] lrtensor keeps the root node ID of 1-level tree
* \param[in] nc number of partitions/communities
* \param[in] Nn number of nodes in the input graph
* @param[out] lrtensor keeps the root node ID of 1-level tree
* @param[in] nc number of partitions/communities
* @param[in] Nn number of nodes in the input graph
*/
void Libra2dglSetLR(
NDArray gdt_key, NDArray gdt_value, NDArray lrtensor, int32_t nc,
......@@ -507,35 +507,35 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibra2dglSetLR")
});
/*!
* \brief For each node in a partition, it creates a list of remote clone IDs;
* @brief For each node in a partition, it creates a list of remote clone IDs;
* also, for each node in a partition, it gathers the data (feats, label,
* trian, test) from input graph.
* \param[out] feat node features in current partition c.
* \param[in] gfeat input graph node features.
* \param[out] adj list of node IDs of remote clones.
* \param[out] inner_nodes marks whether a node is split or not.
* \param[in] ldt_key per partition dict for tracking global to local node IDs
* \param[out] gdt_key global dict for storing number of local nodes (or split
* @param[out] feat node features in current partition c.
* @param[in] gfeat input graph node features.
* @param[out] adj list of node IDs of remote clones.
* @param[out] inner_nodes marks whether a node is split or not.
* @param[in] ldt_key per partition dict for tracking global to local node IDs
* @param[out] gdt_key global dict for storing number of local nodes (or split
* nodes) for a given global node ID \param[out] gdt_value global
* dict, stores local node IDs (due to split) across partitions for
* a given global node ID.
* \param[in] node_map keeps track of range of local node IDs (consecutive)
* @param[in] node_map keeps track of range of local node IDs (consecutive)
* given to the nodes in the partitions.
* \param[out] lr 1-level tree marking for local split nodes.
* \param[in] lrtensor global (all the partitions) 1-level tree.
* \param[in] num_nodes number of nodes in current partition.
* \param[in] nc number of partitions/communities.
* \param[in] c current partition/community.
* \param[in] feat_size node feature vector size.
* \param[out] labels local (for this partition) labels.
* \param[out] trainm local (for this partition) training nodes.
* \param[out] testm local (for this partition) testing nodes.
* \param[out] valm local (for this partition) validation nodes.
* \param[in] glabels global (input graph) labels.
* \param[in] gtrainm glabal (input graph) training nodes.
* \param[in] gtestm glabal (input graph) testing nodes.
* \param[in] gvalm glabal (input graph) validation nodes.
* \param[out] Nn number of nodes in the input graph.
* @param[out] lr 1-level tree marking for local split nodes.
* @param[in] lrtensor global (all the partitions) 1-level tree.
* @param[in] num_nodes number of nodes in current partition.
* @param[in] nc number of partitions/communities.
* @param[in] c current partition/community.
* @param[in] feat_size node feature vector size.
* @param[out] labels local (for this partition) labels.
* @param[out] trainm local (for this partition) training nodes.
* @param[out] testm local (for this partition) testing nodes.
* @param[out] valm local (for this partition) validation nodes.
* @param[in] glabels global (input graph) labels.
* @param[in] gtrainm glabal (input graph) training nodes.
* @param[in] gtestm glabal (input graph) testing nodes.
* @param[in] gvalm glabal (input graph) validation nodes.
* @param[out] Nn number of nodes in the input graph.
*/
template <typename IdType, typename IdType2, typename DType>
void Libra2dglBuildAdjlist(
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/selector.h
* \brief Selector functions to select among src/edge/dst attributes.
* @file array/selector.h
* @brief Selector functions to select among src/edge/dst attributes.
*/
#ifndef DGL_ARRAY_SELECTOR_H_
#define DGL_ARRAY_SELECTOR_H_
......@@ -23,8 +23,8 @@ namespace {
} // namespace
/*!
* \brief Select among src/edge/dst feature/idx.
* \note the integer argument target specifies which target
* @brief Select among src/edge/dst feature/idx.
* @note the integer argument target specifies which target
* to choose, 0: src, 1: edge, 2: dst.
*/
template <int target>
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/coo_union_partition.cc
* \brief COO union and partition
* @file array/cpu/coo_union_partition.cc
* @brief COO union and partition
*/
#include <dgl/array.h>
......
/*!
* Copyright (c) 2019-2022 by Contributors
* \file array/uvm_array.cc
* \brief DGL array utilities implementation
* @file array/uvm_array.cc
* @brief DGL array utilities implementation
*/
#include <dgl/array.h>
......
/*!
* Copyright (c) 2019-2022 by Contributors
* \file array/uvm_array_op.h
* \brief Array operator templates
* @file array/uvm_array_op.h
* @brief Array operator templates
*/
#ifndef DGL_ARRAY_UVM_ARRAY_OP_H_
#define DGL_ARRAY_UVM_ARRAY_OP_H_
......
/*!
* Copyright (c) 2020 by Contributors
* \file kernel/bcast.h
* \brief Broadcast related function implementations.
* @file kernel/bcast.h
* @brief Broadcast related function implementations.
*/
#include <dgl/bcast.h>
#include <dmlc/logging.h>
......@@ -12,7 +12,7 @@ namespace dgl {
namespace {
/*!
* \brief Determine whether use broadcasting or not, given the operator
* @brief Determine whether use broadcasting or not, given the operator
* type, lhs array and rhs array.
*/
bool UseBcast(const std::string& op, NDArray lhs, NDArray rhs) {
......@@ -28,9 +28,9 @@ bool UseBcast(const std::string& op, NDArray lhs, NDArray rhs) {
} // namespace
/*!
* \brief: Compute broadcast and auxiliary information given operator
* @brief: Compute broadcast and auxiliary information given operator
* and operands for kernel computation.
* \note: Expect lhs, rhs to have ndim >= 2 and the shape of lhs/rhs
* @note: Expect lhs, rhs to have ndim >= 2 and the shape of lhs/rhs
* valid for the op computation.
*/
BcastOff CalcBcastOff(const std::string& op, NDArray lhs, NDArray rhs) {
......
/*!
* Copyright (c) 2018 by Contributors
* \file c_runtime_api.cc
* \brief DGL C API common implementations
* @file c_runtime_api.cc
* @brief DGL C API common implementations
*/
#include "c_api_common.h"
......
/*!
* Copyright (c) 2018 by Contributors
* \file c_api_common.h
* \brief DGL C API common util functions
* @file c_api_common.h
* @brief DGL C API common util functions
*/
#ifndef DGL_C_API_COMMON_H_
#define DGL_C_API_COMMON_H_
......@@ -26,13 +26,13 @@ typedef void* CommunicatorHandle;
typedef void* KVMsgHandle;
/*!
* \brief Convert a vector of NDArray to PackedFunc.
* @brief Convert a vector of NDArray to PackedFunc.
*/
dgl::runtime::PackedFunc ConvertNDArrayVectorToPackedFunc(
const std::vector<dgl::runtime::NDArray>& vec);
/*!
* \brief Copy a vector to an NDArray.
* @brief Copy a vector to an NDArray.
*
* The data type of the NDArray will be IdType, which must be an integer type.
* The element type (DType) of the vector must be convertible to IdType.
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/geometry_op_impl.cc
* \brief Geometry operator CPU implementation
* @file array/cpu/geometry_op_impl.cc
* @brief Geometry operator CPU implementation
*/
#include <dgl/random.h>
......@@ -16,7 +16,7 @@ using runtime::NDArray;
namespace geometry {
namespace impl {
/*! \brief Knuth shuffle algorithm */
/*! @brief Knuth shuffle algorithm */
template <typename IdType>
void IndexShuffle(IdType *idxs, int64_t num_elems) {
for (int64_t i = num_elems - 1; i > 0; --i) {
......@@ -27,14 +27,14 @@ void IndexShuffle(IdType *idxs, int64_t num_elems) {
template void IndexShuffle<int32_t>(int32_t *idxs, int64_t num_elems);
template void IndexShuffle<int64_t>(int64_t *idxs, int64_t num_elems);
/*! \brief Groupwise index shuffle algorithm. This function will perform shuffle
/*! @brief Groupwise index shuffle algorithm. This function will perform shuffle
* in subarrays indicated by group index. The group index is similar to indptr
* in CSRMatrix.
*
* \param group_idxs group index array.
* \param idxs index array for shuffle.
* \param num_groups_idxs length of group_idxs
* \param num_elems length of idxs
* @param group_idxs group index array.
* @param idxs index array for shuffle.
* @param num_groups_idxs length of group_idxs
* @param num_elems length of idxs
*/
template <typename IdType>
void GroupIndexShuffle(
......@@ -77,7 +77,7 @@ IdArray GroupRandomPerm(
}
/*!
* \brief Farthest Point Sampler without the need to compute all pairs of
* @brief Farthest Point Sampler without the need to compute all pairs of
* distance.
*
* The input array has shape (N, d), where N is the number of points, and d is
......
/*!
* Copyright (c) 2019 by Contributors
* \file geometry/cuda/edge_coarsening_impl.cu
* \brief Edge coarsening CUDA implementation
* @file geometry/cuda/edge_coarsening_impl.cu
* @brief Edge coarsening CUDA implementation
*/
#include <curand_kernel.h>
#include <dgl/array.h>
......@@ -109,7 +109,7 @@ __global__ void weighted_respond_kernel(
}
}
/*! \brief The colorize procedure. This procedure randomly marks unmarked
/*! @brief The colorize procedure. This procedure randomly marks unmarked
* nodes with BLUE(-1) and RED(-2) and checks whether the node matching
* process has finished.
*/
......@@ -137,7 +137,7 @@ bool Colorize(IdType *result_data, int64_t num_nodes, float *const prop) {
return done_h;
}
/*! \brief Weighted neighbor matching procedure (GPU version).
/*! @brief Weighted neighbor matching procedure (GPU version).
* This implementation is from `A GPU Algorithm for Greedy Graph Matching
* <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
*
......@@ -198,7 +198,7 @@ template void WeightedNeighborMatching<kDGLCUDA, double, int32_t>(
template void WeightedNeighborMatching<kDGLCUDA, double, int64_t>(
const aten::CSRMatrix &csr, const NDArray weight, IdArray result);
/*! \brief Unweighted neighbor matching procedure (GPU version).
/*! @brief Unweighted neighbor matching procedure (GPU version).
* Instead of directly sample neighbors, we assign each neighbor
* with a random weight. We use random weight for 2 reasons:
* 1. Random sample for each node in GPU is expensive. Although
......
/*!
* Copyright (c) 2019 by Contributors
* \file geometry/cuda/geometry_op_impl.cc
* \brief Geometry operator CUDA implementation
* @file geometry/cuda/geometry_op_impl.cc
* @brief Geometry operator CUDA implementation
*/
#include <dgl/array.h>
......@@ -16,7 +16,7 @@ namespace geometry {
namespace impl {
/*!
* \brief Farthest Point Sampler without the need to compute all pairs of
* @brief Farthest Point Sampler without the need to compute all pairs of
* distance.
*
* The input array has shape (N, d), where N is the number of points, and d is
......
/*!
* Copyright (c) 2019 by Contributors
* \file geometry/geometry.cc
* \brief DGL geometry utilities implementation
* @file geometry/geometry.cc
* @brief DGL geometry utilities implementation
*/
#include <dgl/array.h>
#include <dgl/base_heterograph.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file geometry/geometry_op.h
* \brief Geometry operator templates
* @file geometry/geometry_op.h
* @brief Geometry operator templates
*/
#ifndef DGL_GEOMETRY_GEOMETRY_OP_H_
#define DGL_GEOMETRY_GEOMETRY_OP_H_
......@@ -17,7 +17,7 @@ void FarthestPointSampler(
NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist,
IdArray start_idx, IdArray result);
/*! \brief Implementation of weighted neighbor matching process of edge
/*! @brief Implementation of weighted neighbor matching process of edge
* coarsening used in Metis and Graclus for homogeneous graph coarsening. This
* procedure keeps picking an unmarked vertex and matching it with one its
* unmarked neighbors (that maximizes its edge weight) until no match can be
......@@ -27,7 +27,7 @@ template <DGLDeviceType XPU, typename FloatType, typename IdType>
void WeightedNeighborMatching(
const aten::CSRMatrix &csr, const NDArray weight, IdArray result);
/*! \brief Implementation of neighbor matching process of edge coarsening used
/*! @brief Implementation of neighbor matching process of edge coarsening used
* in Metis and Graclus for homogeneous graph coarsening. This procedure keeps
* picking an unmarked vertex and matching it with one its unmarked neighbors
* (that maximizes its edge weight) until no match can be done.
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/creators.cc
* \brief Functions for constructing graphs.
* @file graph/creators.cc
* @brief Functions for constructing graphs.
*/
#include "./heterograph.h"
using namespace dgl::runtime;
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/gk_ops.cc
* \brief Graph operation implemented in GKlib
* @file graph/gk_ops.cc
* @brief Graph operation implemented in GKlib
*/
#if !defined(_WIN32)
......@@ -17,9 +17,9 @@ namespace dgl {
/*!
* Convert DGL CSR to GKLib CSR.
* GKLib CSR actually stores a CSR object and a CSC object of a graph.
* \param mat the DGL CSR matrix.
* \param is_row the input DGL matrix is CSR or CSC.
* \return a GKLib CSR.
* @param mat the DGL CSR matrix.
* @param is_row the input DGL matrix is CSR or CSC.
* @return a GKLib CSR.
*/
gk_csr_t *Convert2GKCsr(const aten::CSRMatrix mat, bool is_row) {
// TODO(zhengda) The conversion will be zero-copy in the future.
......@@ -63,9 +63,9 @@ gk_csr_t *Convert2GKCsr(const aten::CSRMatrix mat, bool is_row) {
/*!
* Convert GKLib CSR to DGL CSR.
* GKLib CSR actually stores a CSR object and a CSC object of a graph.
* \param gk_csr the GKLib CSR.
* \param is_row specify whether to convert the CSR or CSC object of GKLib CSR.
* \return a DGL CSR matrix.
* @param gk_csr the GKLib CSR.
* @param is_row specify whether to convert the CSR or CSC object of GKLib CSR.
* @return a DGL CSR matrix.
*/
aten::CSRMatrix Convert2DGLCsr(gk_csr_t *gk_csr, bool is_row) {
// TODO(zhengda) The conversion will be zero-copy in the future.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment