Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*!
* Copyright (c) 2020-2022 by Contributors
* \file array/tensordispatch.h
* \brief This file defines the dispatcher of tensor operators to
* @file array/tensordispatch.h
* @brief This file defines the dispatcher of tensor operators to
* framework-specific implementations.
*
* The dispatcher consists of a TensorDispatcher singleton in DGL C library and
......@@ -39,7 +39,7 @@
#include "ndarray.h"
/*!
* \brief Casts a pointer \c entry to a function pointer with signature of \c
* @brief Casts a pointer \c entry to a function pointer with signature of \c
* func.
*/
#define FUNCCAST(func, entry) (*reinterpret_cast<decltype(&(func))>(entry))
......@@ -48,31 +48,31 @@ namespace dgl {
namespace runtime {
/*!
* \brief Dispatcher that delegates the function calls to framework-specific C++
* @brief Dispatcher that delegates the function calls to framework-specific C++
* APIs.
*
* This class is not thread-safe.
*/
class TensorDispatcher {
public:
/*! \brief Get the singleton instance. */
/*! @brief Get the singleton instance. */
static TensorDispatcher* Global() {
static TensorDispatcher inst;
return &inst;
}
/*! \brief Whether an adapter library is available. */
/*! @brief Whether an adapter library is available. */
inline bool IsAvailable() { return available_; }
/*! \brief Load symbols from the given tensor adapter library path. */
/*! @brief Load symbols from the given tensor adapter library path. */
bool Load(const char* path_cstr);
/*!
* \brief Allocate a piece of CPU memory via PyTorch's CPUAllocator.
* @brief Allocate a piece of CPU memory via PyTorch's CPUAllocator.
* Used in CPUDeviceAPI::AllocWorkspace().
*
* \param nbytes The size to be allocated.
* \return Pointer to the allocated memory.
* @param nbytes The size to be allocated.
* @return Pointer to the allocated memory.
*/
inline void* CPUAllocWorkspace(size_t nbytes) {
auto entry = entrypoints_[Op::kCPURawAlloc];
......@@ -80,10 +80,10 @@ class TensorDispatcher {
}
/*!
* \brief Free the CPU memory.
* @brief Free the CPU memory.
* Used in CPUDeviceAPI::FreeWorkspace().
*
* \param ptr Pointer to the memory to be freed.
* @param ptr Pointer to the memory to be freed.
*/
inline void CPUFreeWorkspace(void* ptr) {
auto entry = entrypoints_[Op::kCPURawDelete];
......@@ -92,17 +92,17 @@ class TensorDispatcher {
#ifdef DGL_USE_CUDA
/*!
* \brief Allocate a piece of GPU memory via
* @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator.
* Used in CUDADeviceAPI::AllocWorkspace().
*
* \note THCCachingAllocator specify the device to allocate on
* @note THCCachingAllocator specify the device to allocate on
* via cudaGetDevice(). Make sure to call cudaSetDevice()
* before invoking this function.
*
* \param nbytes The size to be allocated.
* \param stream The stream to be allocated on.
* \return Pointer to the allocated memory.
* @param nbytes The size to be allocated.
* @param stream The stream to be allocated on.
* @return Pointer to the allocated memory.
*/
inline void* CUDAAllocWorkspace(size_t nbytes, cudaStream_t stream) {
auto entry = entrypoints_[Op::kCUDARawAlloc];
......@@ -110,10 +110,10 @@ class TensorDispatcher {
}
/*!
* \brief Free the GPU memory.
* @brief Free the GPU memory.
* Used in CUDADeviceAPI::FreeWorkspace().
*
* \param ptr Pointer to the memory to be freed.
* @param ptr Pointer to the memory to be freed.
*/
inline void CUDAFreeWorkspace(void* ptr) {
auto entry = entrypoints_[Op::kCUDARawDelete];
......@@ -121,14 +121,14 @@ class TensorDispatcher {
}
/*!
* \brief Find the current PyTorch CUDA stream
* @brief Find the current PyTorch CUDA stream
* Used in runtime::getCurrentCUDAStream().
*
* \note PyTorch pre-allocates/sets the current CUDA stream
* @note PyTorch pre-allocates/sets the current CUDA stream
* on current device via cudaGetDevice(). Make sure to call cudaSetDevice()
* before invoking this function.
*
* \return cudaStream_t stream handle
* @return cudaStream_t stream handle
*/
inline cudaStream_t CUDAGetCurrentStream() {
auto entry = entrypoints_[Op::kCUDACurrentStream];
......@@ -137,12 +137,12 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA
/*!
* \brief Record streams that are using this tensor.
* @brief Record streams that are using this tensor.
* Used in NDArray::RecordStream().
*
* \param ptr Pointer of the tensor to be recorded.
* \param stream The stream that is using this tensor.
* \param device_id Device of the tensor.
* @param ptr Pointer of the tensor to be recorded.
* @param stream The stream that is using this tensor.
* @param device_id Device of the tensor.
*/
inline void RecordStream(void* ptr, DGLStreamHandle stream, int device_id) {
#ifdef DGL_USE_CUDA
......@@ -153,13 +153,13 @@ class TensorDispatcher {
}
private:
/*! \brief ctor */
/*! @brief ctor */
TensorDispatcher() = default;
/*! \brief dtor */
/*! @brief dtor */
~TensorDispatcher();
/*!
* \brief List of symbols in the adapter library.
* @brief List of symbols in the adapter library.
*
* Must match the functions in tensoradapter/include/tensoradapter.h.
*/
......@@ -170,7 +170,7 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA
};
/*! \brief Index of each function to the symbol list */
/*! @brief Index of each function to the symbol list */
class Op {
public:
static constexpr int kCPURawAlloc = 0;
......@@ -183,10 +183,10 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA
};
/*! \brief Number of functions */
/*! @brief Number of functions */
static constexpr int num_entries_ = sizeof(names_) / sizeof(names_[0]);
/*! \brief Entrypoints of each function */
/*! @brief Entrypoints of each function */
void* entrypoints_[num_entries_] = {
nullptr, nullptr,
#ifdef DGL_USE_CUDA
......
/*!
* Copyright (c) 2018 by Contributors
* \file dgl/runtime/threading_backend.h
* \brief Utilities for manipulating thread pool threads.
* @file dgl/runtime/threading_backend.h
* @brief Utilities for manipulating thread pool threads.
*/
#ifndef DGL_RUNTIME_THREADING_BACKEND_H_
#define DGL_RUNTIME_THREADING_BACKEND_H_
......@@ -15,7 +15,7 @@ namespace runtime {
namespace threading {
/*!
* \brief A platform-agnostic abstraction for managing a collection of
* @brief A platform-agnostic abstraction for managing a collection of
* thread pool threads.
*/
class ThreadGroup {
......@@ -23,13 +23,13 @@ class ThreadGroup {
class Impl;
/*!
* \brief Creates a collection of threads which run a provided function.
* @brief Creates a collection of threads which run a provided function.
*
* \param num_workers The total number of worker threads in this group.
* @param num_workers The total number of worker threads in this group.
Includes main thread if `exclude_worker0 = true`
* \param worker_callback A callback which is run in its own thread.
* @param worker_callback A callback which is run in its own thread.
Receives the worker_id as an argument.
* \param exclude_worker0 Whether to use the main thread as a worker.
* @param exclude_worker0 Whether to use the main thread as a worker.
* If `true`, worker0 will not be launched in a new thread and
* `worker_callback` will only be called for values >= 1. This
* allows use of the main thread as a worker.
......@@ -40,7 +40,7 @@ class ThreadGroup {
~ThreadGroup();
/*!
* \brief Blocks until all non-main threads in the pool finish.
* @brief Blocks until all non-main threads in the pool finish.
*/
void Join();
......@@ -50,16 +50,16 @@ class ThreadGroup {
};
/*!
* \brief configure the CPU id affinity
* @brief configure the CPU id affinity
*
* \param mode The preferred CPU type (1 = big, -1 = little).
* \param nthreads The number of threads to use (0 = use all).
* \param exclude_worker0 Whether to use the main thread as a worker.
* @param mode The preferred CPU type (1 = big, -1 = little).
* @param nthreads The number of threads to use (0 = use all).
* @param exclude_worker0 Whether to use the main thread as a worker.
* If `true`, worker0 will not be launched in a new thread and
* `worker_callback` will only be called for values >= 1. This
* allows use of the main thread as a worker.
*
* \return The number of workers to use.
* @return The number of workers to use.
*/
int Configure(AffinityMode mode, int nthreads, bool exclude_worker0);
......@@ -68,14 +68,14 @@ class ThreadGroup {
};
/*!
* \brief Platform-agnostic no-op.
* @brief Platform-agnostic no-op.
*/
// This used to be Yield(), renaming to YieldThread() because windows.h defined
// it as a macro in later SDKs.
void YieldThread();
/*!
* \return the maximum number of effective workers for this system.
* @return the maximum number of effective workers for this system.
*/
int MaxConcurrency();
......
/*!
* Copyright (c) 2017 by Contributors
* \file dgl/runtime/util.h
* \brief Useful runtime util.
* @file dgl/runtime/util.h
* @brief Useful runtime util.
*/
#ifndef DGL_RUNTIME_UTIL_H_
#define DGL_RUNTIME_UTIL_H_
......@@ -12,11 +12,11 @@ namespace dgl {
namespace runtime {
/*!
* \brief Check whether type matches the given spec.
* \param t The type
* \param code The type code.
* \param bits The number of bits to be matched.
* \param lanes The number of lanes sin the type.
* @brief Check whether type matches the given spec.
* @param t The type
* @param code The type code.
* @param bits The number of bits to be matched.
* @param lanes The number of lanes sin the type.
*/
inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) {
return t.code == code && t.bits == bits && t.lanes == lanes;
......@@ -28,7 +28,7 @@ inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) {
namespace dgl {
namespace ir {
namespace intrinsic {
/*! \brief The kind of structure field info used in intrinsic */
/*! @brief The kind of structure field info used in intrinsic */
enum DGLStructFieldKind : int {
// array head address
kArrAddr,
......
/*!
* Copyright (c) 2018 by Contributors
* \file dgl/sampler.h
* \brief DGL sampler header.
* @file dgl/sampler.h
* @brief DGL sampler header.
*/
#ifndef DGL_SAMPLER_H_
#define DGL_SAMPLER_H_
......@@ -21,17 +21,17 @@ class ImmutableGraph;
class SamplerOp {
public:
/*!
* \brief Sample a graph from the seed vertices with neighbor sampling.
* @brief Sample a graph from the seed vertices with neighbor sampling.
* The neighbors are sampled with a uniform distribution.
*
* \param graphs A graph for sampling.
* \param seeds the nodes where we should start to sample.
* \param edge_type the type of edges we should sample neighbors.
* \param num_hops the number of hops to sample neighbors.
* \param expand_factor the max number of neighbors to sample.
* \param add_self_loop whether to add self loop to the sampled subgraph
* \param probability the transition probability (float/double).
* \return a NodeFlow graph.
* @param graphs A graph for sampling.
* @param seeds the nodes where we should start to sample.
* @param edge_type the type of edges we should sample neighbors.
* @param num_hops the number of hops to sample neighbors.
* @param expand_factor the max number of neighbors to sample.
* @param add_self_loop whether to add self loop to the sampled subgraph
* @param probability the transition probability (float/double).
* @return a NodeFlow graph.
*/
template <typename ValueType>
static NodeFlow NeighborSample(
......@@ -40,14 +40,14 @@ class SamplerOp {
const bool add_self_loop, const ValueType *probability);
/*!
* \brief Sample a graph from the seed vertices with layer sampling.
* @brief Sample a graph from the seed vertices with layer sampling.
* The layers are sampled with a uniform distribution.
*
* \param graphs A graph for sampling.
* \param seeds the nodes where we should start to sample.
* \param edge_type the type of edges we should sample neighbors.
* \param layer_sizes The size of layers.
* \return a NodeFlow graph.
* @param graphs A graph for sampling.
* @param seeds the nodes where we should start to sample.
* @param edge_type the type of edges we should sample neighbors.
* @param layer_sizes The size of layers.
* @return a NodeFlow graph.
*/
static NodeFlow LayerUniformSample(
const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds,
......
/*!
* Copyright (c) 2020 by Contributors
* \file dgl/sampling/negative.h
* \brief Negative sampling.
* @file dgl/sampling/negative.h
* @brief Negative sampling.
*/
#ifndef DGL_SAMPLING_NEGATIVE_H_
#define DGL_SAMPLING_NEGATIVE_H_
......@@ -15,24 +15,24 @@ namespace dgl {
namespace sampling {
/*!
* \brief Given an edge type, uniformly sample source-destination pairs that do
* @brief Given an edge type, uniformly sample source-destination pairs that do
* not have an edge in between using rejection sampling.
*
* \note This function may not return the same number of elements as the given
* @note This function may not return the same number of elements as the given
* number of samples.
* \note This function requires sorting the CSR or CSC matrix of the graph
* @note This function requires sorting the CSR or CSC matrix of the graph
* in-place. It prefers CSC over CSR.
*
* \param hg The graph.
* \param etype The edge type.
* \param num_samples The number of negative examples to sample.
* \param num_trials The number of rejection sampling trials.
* \param exclude_self_loops Do not include the examples where the source equals
* @param hg The graph.
* @param etype The edge type.
* @param num_samples The number of negative examples to sample.
* @param num_trials The number of rejection sampling trials.
* @param exclude_self_loops Do not include the examples where the source equals
* the destination.
* \param replace Whether to sample with replacement.
* \param redundancy How much redundant negative examples to take in case of
* @param replace Whether to sample with replacement.
* @param redundancy How much redundant negative examples to take in case of
* duplicate examples.
* \return The pair of source and destination tensors.
* @return The pair of source and destination tensors.
*/
std::pair<IdArray, IdArray> GlobalUniformNegativeSampling(
HeteroGraphPtr hg, dgl_type_t etype, int64_t num_samples, int num_trials,
......
/*!
* Copyright (c) 2020 by Contributors
* \file dgl/sampling/neighbor.h
* \brief Neighborhood-based sampling.
* @file dgl/sampling/neighbor.h
* @brief Neighborhood-based sampling.
*/
#ifndef DGL_SAMPLING_NEIGHBOR_H_
#define DGL_SAMPLING_NEIGHBOR_H_
......@@ -15,7 +15,7 @@ namespace dgl {
namespace sampling {
/*!
* \brief Sample from the neighbors of the given nodes and return the sampled
* @brief Sample from the neighbors of the given nodes and return the sampled
* edges as a graph.
*
* When sampling with replacement, the sampled subgraph could have parallel
......@@ -24,21 +24,21 @@ namespace sampling {
* For sampling without replace, if fanout > the number of neighbors, all the
* neighbors will be sampled.
*
* \param hg The input graph.
* \param nodes Node IDs of each type. The vector length must be equal to the
* @param hg The input graph.
* @param nodes Node IDs of each type. The vector length must be equal to the
* number of node types. Empty array is allowed.
* \param fanouts Number of sampled neighbors for each edge type. The vector
* @param fanouts Number of sampled neighbors for each edge type. The vector
* length should be equal to the number of edge types, or one if they all have
* the same fanout.
* \param dir Edge direction.
* \param probability A vector of 1D float arrays, indicating the transition
* @param dir Edge direction.
* @param probability A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform
* transition.
* \param exclude_edges Edges IDs of each type which will be excluded during
* @param exclude_edges Edges IDs of each type which will be excluded during
* sampling. The vector length must be equal to the number of edges types. Empty
* array is allowed.
* \param replace If true, sample with replacement.
* \return Sampled neighborhoods as a graph. The return graph has the same
* @param replace If true, sample with replacement.
* @return Sampled neighborhoods as a graph. The return graph has the same
* schema as the original one.
*/
HeteroSubgraph SampleNeighbors(
......@@ -53,17 +53,17 @@ HeteroSubgraph SampleNeighbors(
*
* If k > the number of neighbors, all the neighbors are sampled.
*
* \param hg The input graph.
* \param nodes Node IDs of each type. The vector length must be equal to the
* @param hg The input graph.
* @param nodes Node IDs of each type. The vector length must be equal to the
* number of node types. Empty array is allowed.
* \param k The k value for each edge type. The vector length should be equal to
* @param k The k value for each edge type. The vector length should be equal to
* the number of edge types, or one if they all have the same fanout.
* \param dir Edge direction.
* \param weight A vector of 1D float arrays, indicating the weights associated
* @param dir Edge direction.
* @param weight A vector of 1D float arrays, indicating the weights associated
* witheach edge.
* \param ascending If true, elements are sorted by ascending order, equivalent
* @param ascending If true, elements are sorted by ascending order, equivalent
* to find the K smallest values. Otherwise, find K largest values.
* \return Sampled neighborhoods as a graph. The return graph has the same
* @return Sampled neighborhoods as a graph. The return graph has the same
* schema as the original one.
*/
HeteroSubgraph SampleNeighborsTopk(
......
/*!
* Copyright (c) 2019 by Contributors
* \file dgl/samplinig/randomwalks.h
* \brief Random walk functions.
* @file dgl/samplinig/randomwalks.h
* @brief Random walk functions.
*/
#ifndef DGL_SAMPLING_RANDOMWALKS_H_
#define DGL_SAMPLING_RANDOMWALKS_H_
......@@ -18,15 +18,15 @@ namespace dgl {
namespace sampling {
/*!
* \brief Metapath-based random walk.
* \param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the
* @brief Metapath-based random walk.
* @param hg The heterograph.
* @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition
* @param metapath A 1D array of edge types representing the metapath.
* @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform
* transition.
* \return A pair of
* @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
......@@ -38,16 +38,16 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalk(
const std::vector<FloatArray> &prob);
/*!
* \brief Metapath-based random walk with restart probability.
* \param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the
* @brief Metapath-based random walk with restart probability.
* @param hg The heterograph.
* @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition
* @param metapath A 1D array of edge types representing the metapath.
* @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform
* transition.
* \param restart_prob Restart probability.
* \return A pair of
* @param restart_prob Restart probability.
* @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
......@@ -59,19 +59,19 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalkWithRestart(
const std::vector<FloatArray> &prob, double restart_prob);
/*!
* \brief Metapath-based random walk with stepwise restart probability. Useful
* @brief Metapath-based random walk with stepwise restart probability. Useful
* for PinSAGE-like models.
* \param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the
* @param hg The heterograph.
* @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition
* @param metapath A 1D array of edge types representing the metapath.
* @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform
* transition.
* \param restart_prob Restart probability array which has the same number of
* @param restart_prob Restart probability array which has the same number of
* elements as \c metapath, indicating the probability to terminate after
* transition.
* \return A pair of
* @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
......
/*!
* Copyright (c) 2018 by Contributors
* \file dgl/scheduler.h
* \brief Operations on graph index.
* @file dgl/scheduler.h
* @brief Operations on graph index.
*/
#ifndef DGL_SCHEDULER_H_
#define DGL_SCHEDULER_H_
......@@ -17,14 +17,14 @@ typedef dgl::runtime::NDArray IdArray;
namespace sched {
/*!
* \brief Generate degree bucketing schedule
* \tparam IdType Graph's index data type, can be int32_t or int64_t
* \param msg_ids The edge id for each message
* \param vids The destination vertex for each message
* \param recv_ids The recv nodes (for checking zero degree nodes)
* \note If there are multiple messages going into the same destination vertex,
* @brief Generate degree bucketing schedule
* @tparam IdType Graph's index data type, can be int32_t or int64_t
* @param msg_ids The edge id for each message
* @param vids The destination vertex for each message
* @param recv_ids The recv nodes (for checking zero degree nodes)
* @note If there are multiple messages going into the same destination vertex,
* then there will be multiple copies of the destination vertex in vids.
* \return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* @return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* degrees: degrees for each bucket
* nids: destination node ids
* nid_section: number of nodes in each bucket (used to split nids)
......@@ -36,16 +36,16 @@ std::vector<IdArray> DegreeBucketing(
const IdArray& msg_ids, const IdArray& vids, const IdArray& recv_ids);
/*!
* \brief Generate degree bucketing schedule for group_apply edge
* \tparam IdType Graph's index data type, can be int32_t or int64_t
* \param uids One end vertex of edge by which edges are grouped
* \param vids The other end vertex of edge
* \param eids Edge ids
* \note This function always generate group_apply schedule based on degrees of
* @brief Generate degree bucketing schedule for group_apply edge
* @tparam IdType Graph's index data type, can be int32_t or int64_t
* @param uids One end vertex of edge by which edges are grouped
* @param vids The other end vertex of edge
* @param eids Edge ids
* @note This function always generate group_apply schedule based on degrees of
* nodes in uids. Therefore, if group_apply by source nodes, then uids
* should be source. If group_apply by destination nodes, then uids
* should be destination.
* \return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* @return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* degrees: degrees for each bucket
* new_uids: uids reordered by degree bucket
* new_vids: vids reordered by degree bucket
......
/*!
* Copyright (c) 2019 by Contributors
* \file dgl/transform.h
* \brief DGL graph transformations
* @file dgl/transform.h
* @brief DGL graph transformations
*/
#ifndef DGL_TRANSFORM_H_
......@@ -19,17 +19,17 @@ namespace dgl {
namespace transform {
/*!
* \brief Given a list of graphs, remove the common nodes that do not have
* @brief Given a list of graphs, remove the common nodes that do not have
* inbound and outbound edges.
*
* The graphs should have identical node ID space (i.e. should have the same set
* of nodes, including types and IDs).
*
* \param graphs The list of graphs.
* \param always_preserve The list of nodes to preserve regardless of whether
* @param graphs The list of graphs.
* @param always_preserve The list of nodes to preserve regardless of whether
* the inbound or outbound edges exist.
*
* \return A pair. The first element is the list of compacted graphs, and the
* @return A pair. The first element is the list of compacted graphs, and the
* second element is the mapping from the compacted graphs and the original
* graph.
*/
......@@ -38,7 +38,7 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs(
const std::vector<IdArray> &always_preserve);
/*!
* \brief Convert a graph into a bipartite-structured graph for message passing.
* @brief Convert a graph into a bipartite-structured graph for message passing.
*
* Specifically, we create one node type \c ntype_l on the "left" side and
* another node type \c ntype_r on the "right" side for each node type \c ntype.
......@@ -65,17 +65,17 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs(
* output = x
* </code>
*
* \param graph The graph.
* \param rhs_nodes Designated nodes that would appear on the right side.
* \param include_rhs_in_lhs If false, do not include the nodes of node type \c
* @param graph The graph.
* @param rhs_nodes Designated nodes that would appear on the right side.
* @param include_rhs_in_lhs If false, do not include the nodes of node type \c
* ntype_r in \c ntype_l.
*
* \return A triplet containing
* @return A triplet containing
* * The bipartite-structured graph,
* * The induced node from the left side for each graph,
* * The induced edges.
*
* \note If include_rhs_in_lhs is true, then for each node type \c ntype, the
* @note If include_rhs_in_lhs is true, then for each node type \c ntype, the
* nodes in rhs_nodes[ntype] would always appear first in the nodes of type \c
* ntype_l in the new graph.
*/
......@@ -84,15 +84,15 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ToBlock(
bool include_rhs_in_lhs);
/*!
* \brief Convert a multigraph to a simple graph.
* @brief Convert a multigraph to a simple graph.
*
* \return A triplet of
* * \c hg : The said simple graph.
* * \c count : The array of edge occurrences per edge type.
* * \c edge_map : The mapping from original edge IDs to new edge IDs per edge
* @return A triplet of
* * @c hg : The said simple graph.
* * @c count : The array of edge occurrences per edge type.
* * @c edge_map : The mapping from original edge IDs to new edge IDs per edge
* type.
*
* \note Example: consider a graph with the following edges
* @note Example: consider a graph with the following edges
*
* [(0, 1), (1, 3), (2, 2), (1, 3), (1, 4), (1, 4)]
*
......@@ -117,12 +117,12 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>>
ToSimpleGraph(const HeteroGraphPtr graph);
/*!
* \brief Remove edges from a graph.
* @brief Remove edges from a graph.
*
* \param graph The graph.
* \param eids The edge IDs to remove per edge type.
* @param graph The graph.
* @param eids The edge IDs to remove per edge type.
*
* \return A pair of the graph with edges removed, as well as the edge ID
* @return A pair of the graph with edges removed, as well as the edge ID
* mapping from the original graph to the new graph per edge type.
*/
std::pair<HeteroGraphPtr, std::vector<IdArray>> RemoveEdges(
......
/*!
* Copyright (c) 2020 by Contributors
* \file rpc/shared_mem_serializer.h
* \brief headers for serializer.
* @file rpc/shared_mem_serializer.h
* @brief headers for serializer.
*/
#ifndef DGL_ZEROCOPY_SERIALIZER_H_
#define DGL_ZEROCOPY_SERIALIZER_H_
......@@ -63,10 +63,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
};
/*!
* \brief This constructor is for writing scenario or reading from local
* @brief This constructor is for writing scenario or reading from local
* machine
* \param strm The backup stream to write/load from
* \param send_to_remote Whether this stream will be deserialized at remote
* @param strm The backup stream to write/load from
* @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into
* buffer list.
*/
......@@ -75,9 +75,9 @@ class StreamWithBuffer : public dmlc::SeekStream {
buffer_list_(),
send_to_remote_(send_to_remote) {}
/*!
* \brief This constructor is for reading from remote
* \param strm The stream to write/load from zerocopy write/load
* \param data_ptr_list list of pointer to reconstruct NDArray
* @brief This constructor is for reading from remote
* @param strm The stream to write/load from zerocopy write/load
* @param data_ptr_list list of pointer to reconstruct NDArray
*
* For example:
* std::string blob;
......@@ -95,9 +95,9 @@ class StreamWithBuffer : public dmlc::SeekStream {
}
/*!
* \brief Construct stream backed up by string
* \param blob The string to write/load from zerocopy write/load
* \param send_to_remote Whether this stream will be deserialized at remote
* @brief Construct stream backed up by string
* @param blob The string to write/load from zerocopy write/load
* @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into
* buffer list.
*/
......@@ -106,10 +106,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
send_to_remote_(send_to_remote) {}
/*!
* \brief Construct stream backed up by string
* \param p_buffer buffer pointer
* \param size buffer size
* \param send_to_remote Whether this stream will be deserialized at remote
* @brief Construct stream backed up by string
* @param p_buffer buffer pointer
* @param size buffer size
* @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into
* buffer list.
*/
......@@ -118,10 +118,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
send_to_remote_(send_to_remote) {}
/*!
* \brief Construct stream backed up by string, and reconstruct NDArray
* @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list
* \param blob The string to write/load from zerocopy write/load
* \param data_ptr_list pointer list for NDArrays to deconstruct from
* @param blob The string to write/load from zerocopy write/load
* @param data_ptr_list pointer list for NDArrays to deconstruct from
*/
StreamWithBuffer(std::string* blob, const std::vector<void*>& data_ptr_list)
: strm_(new dmlc::MemoryStringStream(blob)), send_to_remote_(true) {
......@@ -131,11 +131,11 @@ class StreamWithBuffer : public dmlc::SeekStream {
}
/*!
* \brief Construct stream backed up by string, and reconstruct NDArray
* @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list
* \param p_buffer buffer pointer
* \param size buffer size
* \param data_ptr_list pointer list for NDArrays to deconstruct from
* @param p_buffer buffer pointer
* @param size buffer size
* @param data_ptr_list pointer list for NDArrays to deconstruct from
*/
StreamWithBuffer(
char* p_buffer, size_t size, const std::vector<void*>& data_ptr_list)
......@@ -156,14 +156,14 @@ class StreamWithBuffer : public dmlc::SeekStream {
using dmlc::Stream::Write;
/*!
* \brief push NDArray into stream
* @brief push NDArray into stream
* If send_to_remote=true, the NDArray will be saved to the buffer list
* If send_to_remote=false, the NDArray will be saved to the backedup string
*/
void PushNDArray(const runtime::NDArray& tensor);
/*!
* \brief pop NDArray from stream
* @brief pop NDArray from stream
* If send_to_remote=true, the NDArray will be reconstructed from buffer list
* If send_to_remote=false, the NDArray will be reconstructed from shared
* memory
......@@ -171,12 +171,12 @@ class StreamWithBuffer : public dmlc::SeekStream {
dgl::runtime::NDArray PopNDArray();
/*!
* \brief Get whether this stream is for remote usage
* @brief Get whether this stream is for remote usage
*/
bool send_to_remote() { return send_to_remote_; }
/*!
* \brief Get underlying buffer list
* @brief Get underlying buffer list
*/
const std::deque<Buffer>& buffer_list() const { return buffer_list_; }
......
/*!
* Copyright (c) 2019 by Contributors
* \file intel/cpu_support.h
* \brief Intel CPU support
* \author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
* @file intel/cpu_support.h
* @brief Intel CPU support
* @author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
*/
#ifndef INTEL_CPU_SUPPORT_H_
#define INTEL_CPU_SUPPORT_H_
......@@ -54,8 +54,8 @@ struct IntelKernel {
};
/*!
* \brief Element-wise addition kernel using Intel AVX512 instructions.
* \note it uses AVX512.
* @brief Element-wise addition kernel using Intel AVX512 instructions.
* @note it uses AVX512.
*/
template <class Op>
class ElemWiseAddUpdate : public Xbyak::CodeGenerator {
......
/*!
* Copyright (c) 2019 by Contributors
* \file intel/meta_utils.h
* \brief Meta programming utils
* \author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
* @file intel/meta_utils.h
* @brief Meta programming utils
* @author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
*/
#ifndef INTEL_META_UTILS_H_
#define INTEL_META_UTILS_H_
......
/*!
* Copyright (c) 2019 by Contributors
* \file api/api_container.cc
* \brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc)
* @file api/api_container.cc
* @brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc)
*/
#include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h>
......
/*!
* Copyright (c) 2022 by Contributors
* \file api/api_test.cc
* \brief C APIs for testing FFI
* @file api/api_test.cc
* @brief C APIs for testing FFI
*/
#include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/arith.h
* \brief Arithmetic functors
* @file array/arith.h
* @brief Arithmetic functors
*/
#ifndef DGL_ARRAY_ARITH_H_
#define DGL_ARRAY_ARITH_H_
......
/*!
* Copyright (c) 2019-2021 by Contributors
* \file array/array.cc
* \brief DGL array utilities implementation
* @file array/array.cc
* @brief DGL array utilities implementation
*/
#include <dgl/array.h>
#include <dgl/graph_traversal.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/array_aritch.cc
* \brief DGL array arithmetic operations
* @file array/array_aritch.cc
* @brief DGL array arithmetic operations
*/
#include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/array_op.h
* \brief Array operator templates
* @file array/array_op.h
* @brief Array operator templates
*/
#ifndef DGL_ARRAY_ARRAY_OP_H_
#define DGL_ARRAY_ARRAY_OP_H_
......
/*!
* Copyright (c) 2019 by Contributors
* \file array/check.h
* \brief DGL check utilities
* @file array/check.h
* @brief DGL check utilities
*/
#ifndef DGL_ARRAY_CHECK_H_
#define DGL_ARRAY_CHECK_H_
......
/*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/array_cumsum.cc
* \brief Array cumsum CPU implementation
* @file array/cpu/array_cumsum.cc
* @brief Array cumsum CPU implementation
*/
#include <dgl/array.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment