Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*! /**
* Copyright (c) 2022 by Contributors * Copyright (c) 2022 by Contributors
* @file array/cpu/gather_mm.h * @file array/cpu/gather_mm.h
* @brief GATHER_MM CPU kernel function header. * @brief GATHER_MM CPU kernel function header.
...@@ -45,7 +45,7 @@ void matmul( ...@@ -45,7 +45,7 @@ void matmul(
} }
} }
/*! /**
* @brief CPU kernel of Gather_mm. The input matrix A is expected to be * @brief CPU kernel of Gather_mm. The input matrix A is expected to be
* sorted according to relation type. * sorted according to relation type.
* @param A The input dense matrix of dimension m x k * @param A The input dense matrix of dimension m x k
......
/*! /**
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* @file array/cpu/negative_sampling.cc * @file array/cpu/negative_sampling.cc
* @brief Uniform negative sampling on CSR. * @brief Uniform negative sampling on CSR.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/rowwise_pick.h * @file array/cpu/rowwise_pick.h
* @brief Template implementation for rowwise pick operators. * @brief Template implementation for rowwise pick operators.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/rowwise_sampling.cc * @file array/cpu/rowwise_sampling.cc
* @brief rowwise sampling * @brief rowwise sampling
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/rowwise_topk.cc * @file array/cpu/rowwise_topk.cc
* @brief rowwise topk * @brief rowwise topk
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file aten/cpu/sddmm.cc * @file aten/cpu/sddmm.cc
* @brief SDDMM C APIs and definitions. * @brief SDDMM C APIs and definitions.
...@@ -42,7 +42,7 @@ namespace aten { ...@@ -42,7 +42,7 @@ namespace aten {
} while (0) } while (0)
/*! @brief Generalized SDDMM on Csr format. */ /** @brief Generalized SDDMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCsr(const std::string& op, void SDDMMCsr(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op, ...@@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op,
}); });
} }
/*! @brief Generalized SDDMM on Csr format with Heterograph support. */ /** @brief Generalized SDDMM on Csr format with Heterograph support. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCsrHetero(const std::string& op, void SDDMMCsrHetero(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -131,7 +131,7 @@ template void SDDMMCsrHetero<kDGLCPU, int64_t, double>( ...@@ -131,7 +131,7 @@ template void SDDMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& in_eid, const std::vector<dgl_type_t>& in_eid,
const std::vector<dgl_type_t>& out_eid); const std::vector<dgl_type_t>& out_eid);
/*! @brief Generalized SDDMM on Coo format. */ /** @brief Generalized SDDMM on Coo format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCoo(const std::string& op, void SDDMMCoo(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op, ...@@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op,
}); });
} }
/*! @brief Generalized SDDMM on Coo format with Heterograph support. */ /** @brief Generalized SDDMM on Coo format with Heterograph support. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SDDMMCooHetero(const std::string& op, void SDDMMCooHetero(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/sddmm.h * @file array/cpu/sddmm.h
* @brief SDDMM CPU kernel function header. * @brief SDDMM CPU kernel function header.
...@@ -16,7 +16,7 @@ namespace dgl { ...@@ -16,7 +16,7 @@ namespace dgl {
namespace aten { namespace aten {
namespace cpu { namespace cpu {
/*! /**
* @brief CPU kernel of g-SDDMM on Csr format. * @brief CPU kernel of g-SDDMM on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -68,7 +68,7 @@ void SDDMMCsr( ...@@ -68,7 +68,7 @@ void SDDMMCsr(
}); });
} }
/*! /**
* @brief CPU kernel of g-SDDMM on Coo format. * @brief CPU kernel of g-SDDMM on Coo format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param coo The COO matrix. * @param coo The COO matrix.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file kernel/cpu/segment_reduce.cc * @file kernel/cpu/segment_reduce.cc
* @brief Segment reduce C APIs and definitions. * @brief Segment reduce C APIs and definitions.
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! @brief Segment Reduce operator. */ /** @brief Segment Reduce operator. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SegmentReduce( void SegmentReduce(
const std::string& op, const std::string& op,
...@@ -34,7 +34,7 @@ void SegmentReduce( ...@@ -34,7 +34,7 @@ void SegmentReduce(
} }
} }
/*! @brief Scatter Add.*/ /** @brief Scatter Add.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void ScatterAdd(NDArray feat, void ScatterAdd(NDArray feat,
NDArray idx, NDArray idx,
...@@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat, ...@@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat,
cpu::ScatterAdd<IdType, DType>(feat, idx, out); cpu::ScatterAdd<IdType, DType>(feat, idx, out);
} }
/*! @brief Update gradients for reduce operator max/min on heterogeneous graph.*/ /** @brief Update gradients for reduce operator max/min on heterogeneous graph.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
const std::string& op, const std::string& op,
...@@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, ...@@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g,
cpu::UpdateGradMinMax_hetero<IdType, DType>(g, op, feat, idx, idx_etype, out); cpu::UpdateGradMinMax_hetero<IdType, DType>(g, op, feat, idx, idx_etype, out);
} }
/*! @brief Backward function of segment cmp.*/ /** @brief Backward function of segment cmp.*/
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void BackwardSegmentCmp( void BackwardSegmentCmp(
NDArray feat, NDArray feat,
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/spmm.h * @file array/cpu/spmm.h
* @brief Segment reduce kernel function header. * @brief Segment reduce kernel function header.
...@@ -17,7 +17,7 @@ namespace dgl { ...@@ -17,7 +17,7 @@ namespace dgl {
namespace aten { namespace aten {
namespace cpu { namespace cpu {
/*! /**
* @brief CPU kernel of segment sum. * @brief CPU kernel of segment sum.
* @param feat The input tensor. * @param feat The input tensor.
* @param offsets The offset tensor storing the ranges of segments. * @param offsets The offset tensor storing the ranges of segments.
...@@ -42,7 +42,7 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) { ...@@ -42,7 +42,7 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
}); });
} }
/*! /**
* @brief CPU kernel of segment min/max. * @brief CPU kernel of segment min/max.
* @param feat The input tensor. * @param feat The input tensor.
* @param offsets The offset tensor storing the ranges of segments. * @param offsets The offset tensor storing the ranges of segments.
...@@ -76,7 +76,7 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) { ...@@ -76,7 +76,7 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) {
}); });
} }
/*! /**
* @brief CPU kernel of Scatter Add (on first dimension) operator. * @brief CPU kernel of Scatter Add (on first dimension) operator.
* @note math equation: out[idx[i], *] += feat[i, *] * @note math equation: out[idx[i], *] += feat[i, *]
* @param feat The input tensor. * @param feat The input tensor.
...@@ -101,7 +101,7 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) { ...@@ -101,7 +101,7 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) {
} }
} }
/*! /**
* @brief CPU kernel to update gradients for reduce op max/min * @brief CPU kernel to update gradients for reduce op max/min
* @param graph The input heterogeneous graph. * @param graph The input heterogeneous graph.
* @param op The binary operator, could be `copy_u`, `copy_e'. * @param op The binary operator, could be `copy_u`, `copy_e'.
...@@ -159,7 +159,7 @@ void UpdateGradMinMax_hetero( ...@@ -159,7 +159,7 @@ void UpdateGradMinMax_hetero(
} }
} }
/*! /**
* @brief CPU kernel of backward phase of segment min/max. * @brief CPU kernel of backward phase of segment min/max.
* @note math equation: out[arg[i, k], k] = feat[i, k] * @note math equation: out[arg[i, k], k] = feat[i, k]
* @param feat The input tensor. * @param feat The input tensor.
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file array/cpu/spmat_op_impl.cc * @file array/cpu/spmat_op_impl.cc
* @brief CPU implementation of COO sparse matrix operators * @brief CPU implementation of COO sparse matrix operators
...@@ -22,7 +22,7 @@ using runtime::parallel_for; ...@@ -22,7 +22,7 @@ using runtime::parallel_for;
namespace aten { namespace aten {
namespace impl { namespace impl {
/* /**
* TODO(BarclayII): * TODO(BarclayII):
* For row-major sorted COOs, we have faster implementation with binary search, * For row-major sorted COOs, we have faster implementation with binary search,
* sorted search, etc. Later we should benchmark how much we can gain with * sorted search, etc. Later we should benchmark how much we can gain with
...@@ -624,7 +624,7 @@ CSRMatrix UnSortedDenseCOOToCSR(const COOMatrix &coo) { ...@@ -624,7 +624,7 @@ CSRMatrix UnSortedDenseCOOToCSR(const COOMatrix &coo) {
} // namespace } // namespace
/* /**
Implementation and Complexity details. N: num_nodes, NNZ: num_edges, P: Implementation and Complexity details. N: num_nodes, NNZ: num_edges, P:
num_threads. num_threads.
1. If row is sorted in COO, SortedCOOToCSR<> is applied. Time: O(NNZ/P). 1. If row is sorted in COO, SortedCOOToCSR<> is applied. Time: O(NNZ/P).
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file array/cpu/spmat_op_impl_csr.cc * @file array/cpu/spmat_op_impl_csr.cc
* @brief CSR matrix operator CPU implementation * @brief CSR matrix operator CPU implementation
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file kernel/cpu/spmm.cc * @file kernel/cpu/spmm.cc
* @brief SPMM C APIs and definitions. * @brief SPMM C APIs and definitions.
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! @brief Generalized SpMM on Csr format. */ /** @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCsr(const std::string& op, const std::string& reduce, void SpMMCsr(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce, ...@@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce,
} }
} }
/*! @brief Generalized SpMM on Csr format. */ /** @brief Generalized SpMM on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCsrHetero(const std::string& op, const std::string& reduce, void SpMMCsrHetero(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -159,7 +159,7 @@ template void SpMMCsrHetero<kDGLCPU, int64_t, double>( ...@@ -159,7 +159,7 @@ template void SpMMCsrHetero<kDGLCPU, int64_t, double>(
const std::vector<dgl_type_t>& ufeat_node_tids, const std::vector<dgl_type_t>& ufeat_node_tids,
const std::vector<dgl_type_t>& out_node_tids); const std::vector<dgl_type_t>& out_node_tids);
/*! @brief Edge_softmax_csr forward op on Csr format. */ /** @brief Edge_softmax_csr forward op on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_forward(const std::string& op, void Edge_softmax_csr_forward(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op, ...@@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op,
}); });
} }
/*! @brief Edge_softmax_csr backward op on Csr format. */ /** @brief Edge_softmax_csr backward op on Csr format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void Edge_softmax_csr_backward(const std::string& op, void Edge_softmax_csr_backward(const std::string& op,
const BcastOff& bcast, const BcastOff& bcast,
...@@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward<kDGLCPU, int64_t, double>( ...@@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward<kDGLCPU, int64_t, double>(
const BcastOff& bcast, const CSRMatrix& csr, const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out); NDArray ufeat, NDArray efeat, NDArray out);
/*! @brief Generalized SpMM on Coo format. */ /** @brief Generalized SpMM on Coo format. */
template <int XPU, typename IdType, typename DType> template <int XPU, typename IdType, typename DType>
void SpMMCoo(const std::string& op, const std::string& reduce, void SpMMCoo(const std::string& op, const std::string& reduce,
const BcastOff& bcast, const BcastOff& bcast,
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/spmm.h * @file array/cpu/spmm.h
* @brief SPMM CPU kernel function header. * @brief SPMM CPU kernel function header.
...@@ -32,7 +32,7 @@ namespace cpu { ...@@ -32,7 +32,7 @@ namespace cpu {
#if !defined(_WIN32) #if !defined(_WIN32)
#ifdef USE_AVX #ifdef USE_AVX
/*! /**
* @brief CPU kernel of SpMM on Csr format using Xbyak. * @brief CPU kernel of SpMM on Csr format using Xbyak.
* @param cpu_spec JIT'ed kernel * @param cpu_spec JIT'ed kernel
* @param bcast Broadcast information. * @param bcast Broadcast information.
...@@ -69,7 +69,7 @@ void SpMMSumCsrXbyak( ...@@ -69,7 +69,7 @@ void SpMMSumCsrXbyak(
#endif // USE_AVX #endif // USE_AVX
#endif // _WIN32 #endif // _WIN32
/*! /**
* @brief Naive CPU kernel of SpMM on Csr format. * @brief Naive CPU kernel of SpMM on Csr format.
* @param cpu_spec JIT'ed kernel * @param cpu_spec JIT'ed kernel
* @param bcast Broadcast information. * @param bcast Broadcast information.
...@@ -110,7 +110,7 @@ void SpMMSumCsrNaive( ...@@ -110,7 +110,7 @@ void SpMMSumCsrNaive(
}); });
} }
/*! /**
* @brief CPU kernel of SpMM on Csr format. * @brief CPU kernel of SpMM on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -176,7 +176,7 @@ void SpMMSumCsr( ...@@ -176,7 +176,7 @@ void SpMMSumCsr(
#endif // _WIN32 #endif // _WIN32
} }
/*! /**
* @brief CPU kernel of SpMM on Coo format. * @brief CPU kernel of SpMM on Coo format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param coo The Coo matrix. * @param coo The Coo matrix.
...@@ -225,7 +225,7 @@ void SpMMSumCoo( ...@@ -225,7 +225,7 @@ void SpMMSumCoo(
} }
} }
/*! /**
* @brief CPU kernel of SpMM-Min/Max on Csr format. * @brief CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -322,7 +322,7 @@ void SpMMCmpCsr( ...@@ -322,7 +322,7 @@ void SpMMCmpCsr(
#endif // _WIN32 #endif // _WIN32
} }
/*! /**
* @brief CPU kernel of SpMM-Min/Max on Csr format. * @brief CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -418,7 +418,7 @@ void SpMMCmpCsrHetero( ...@@ -418,7 +418,7 @@ void SpMMCmpCsrHetero(
}); });
} }
/*! /**
* @brief CPU kernel of SpMM-Min/Max on Coo format. * @brief CPU kernel of SpMM-Min/Max on Coo format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param coo The Coo matrix. * @param coo The Coo matrix.
...@@ -484,7 +484,7 @@ void SpMMCmpCoo( ...@@ -484,7 +484,7 @@ void SpMMCmpCoo(
} }
} }
/*! /**
* @brief CPU kernel of Edge_softmax_csr_forward on Csr format. * @brief CPU kernel of Edge_softmax_csr_forward on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -532,7 +532,7 @@ void Edge_softmax_csr_forward( ...@@ -532,7 +532,7 @@ void Edge_softmax_csr_forward(
}); });
} }
/*! /**
* @brief CPU kernel of Edge_softmax_csr_backward on Csr format. * @brief CPU kernel of Edge_softmax_csr_backward on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/spmm_binary_ops.h * @file array/cpu/spmm_binary_ops.h
* @brief SPMM CPU Binary ops. * @brief SPMM CPU Binary ops.
......
/*! /**
* Copyright (c) 2021 Intel Corporation * Copyright (c) 2021 Intel Corporation
* @file array/cpu/spmm.h * @file array/cpu/spmm.h
* @brief SPMM CPU kernel function header. * @brief SPMM CPU kernel function header.
...@@ -48,7 +48,7 @@ int32_t GetLLCSize() { ...@@ -48,7 +48,7 @@ int32_t GetLLCSize() {
return cache_size; return cache_size;
} }
/*! /**
* @brief Tile the CSR matrix to roughly make sure that the column tiles and * @brief Tile the CSR matrix to roughly make sure that the column tiles and
* corresponding neighbor features fit into LLC and the row tiles * corresponding neighbor features fit into LLC and the row tiles
* are assigned to OMP threads. * are assigned to OMP threads.
...@@ -165,7 +165,7 @@ inline void SpMMCreateBlocks( ...@@ -165,7 +165,7 @@ inline void SpMMCreateBlocks(
} }
} }
/*! /**
* @brief Create libxsmm kernel. * @brief Create libxsmm kernel.
* @param has_idx For the edge features, are there indices available. * @param has_idx For the edge features, are there indices available.
* @param N Feature size. * @param N Feature size.
...@@ -269,7 +269,7 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel( ...@@ -269,7 +269,7 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel(
return kernel; return kernel;
} }
/*! /**
* @brief Use libxsmm to perform SpMM-Sum on all blocks. * @brief Use libxsmm to perform SpMM-Sum on all blocks.
* @param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* @param B The feature on source nodes. * @param B The feature on source nodes.
...@@ -326,7 +326,7 @@ inline void SpMMBlockwiseOpSum( ...@@ -326,7 +326,7 @@ inline void SpMMBlockwiseOpSum(
} }
} }
/*! /**
* @brief Use libxsmm to perform SpMM-Max/Min on all blocks. * @brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* @param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* @param B The feature on source nodes. * @param B The feature on source nodes.
...@@ -390,7 +390,7 @@ inline void SpMMBlockwiseOpCmp( ...@@ -390,7 +390,7 @@ inline void SpMMBlockwiseOpCmp(
} }
} }
/*! /**
* @brief Free the tiled CSR matrix data. * @brief Free the tiled CSR matrix data.
* @param block_csr_array The array containing csr matrices of all blocks. * @param block_csr_array The array containing csr matrices of all blocks.
* @param num_M_blocks Number of blocks to create along the rows of adjacency * @param num_M_blocks Number of blocks to create along the rows of adjacency
...@@ -412,7 +412,7 @@ inline void SpMMFreeBlocks( ...@@ -412,7 +412,7 @@ inline void SpMMFreeBlocks(
free(block_csr_array); free(block_csr_array);
} }
/*! /**
* @brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format. * @brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -550,7 +550,7 @@ void SpMMRedopCsrOpt( ...@@ -550,7 +550,7 @@ void SpMMRedopCsrOpt(
#endif // DEBUG #endif // DEBUG
} }
/*! /**
* @brief Optimized CPU kernel of SpMM-Sum on Csr format. * @brief Optimized CPU kernel of SpMM-Sum on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
...@@ -568,7 +568,7 @@ void SpMMSumCsrLibxsmm( ...@@ -568,7 +568,7 @@ void SpMMSumCsrLibxsmm(
bcast, csr, ufeat, efeat, out, dummy, dummy); bcast, csr, ufeat, efeat, out, dummy, dummy);
} }
/*! /**
* @brief Optimized CPU kernel of SpMM-Min/Max on Csr format. * @brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* @param bcast Broadcast information. * @param bcast Broadcast information.
* @param csr The Csr matrix. * @param csr The Csr matrix.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/traversal.cc * @file array/cpu/traversal.cc
* @brief Graph traversal implementation * @brief Graph traversal implementation
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/traversal.h * @file array/cpu/traversal.h
* @brief Graph traversal routines. * @brief Graph traversal routines.
...@@ -21,7 +21,7 @@ namespace dgl { ...@@ -21,7 +21,7 @@ namespace dgl {
namespace aten { namespace aten {
namespace impl { namespace impl {
/*! /**
* @brief Traverse the graph in a breadth-first-search (BFS) order. * @brief Traverse the graph in a breadth-first-search (BFS) order.
* *
* The queue object must suffice following interface: * The queue object must suffice following interface:
...@@ -85,7 +85,7 @@ void BFSTraverseNodes( ...@@ -85,7 +85,7 @@ void BFSTraverseNodes(
} }
} }
/*! /**
* @brief Traverse the graph in a breadth-first-search (BFS) order, returning * @brief Traverse the graph in a breadth-first-search (BFS) order, returning
* the edges of the BFS tree. * the edges of the BFS tree.
* *
...@@ -153,7 +153,7 @@ void BFSTraverseEdges( ...@@ -153,7 +153,7 @@ void BFSTraverseEdges(
} }
} }
/*! /**
* @brief Traverse the graph in topological order. * @brief Traverse the graph in topological order.
* *
* The queue object must suffice following interface: * The queue object must suffice following interface:
...@@ -226,13 +226,13 @@ void TopologicalNodes( ...@@ -226,13 +226,13 @@ void TopologicalNodes(
} }
} }
/*!\brief Tags for ``DFSEdges``. */ /** @brief Tags for ``DFSEdges``. */
enum DFSEdgeTag { enum DFSEdgeTag {
kForward = 0, kForward = 0,
kReverse, kReverse,
kNonTree, kNonTree,
}; };
/*! /**
* @brief Traverse the graph in a depth-first-search (DFS) order. * @brief Traverse the graph in a depth-first-search (DFS) order.
* *
* The traversal visit edges in its DFS order. Edges have three tags: * The traversal visit edges in its DFS order. Edges have three tags:
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file array/cpu/array_cumsum.cu * @file array/cpu/array_cumsum.cu
* @brief Array cumsum GPU implementation * @brief Array cumsum GPU implementation
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file array/cpu/array_index_select.cu * @file array/cpu/array_index_select.cu
* @brief Array index select GPU implementation * @brief Array index select GPU implementation
......
/*! /**
* Copyright (c) 2021-2022 by Contributors * Copyright (c) 2021-2022 by Contributors
* @file array/cuda/array_index_select.cuh * @file array/cuda/array_index_select.cuh
* @brief Array index select GPU kernel implementation * @brief Array index select GPU kernel implementation
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment