Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*! /*!
* Copyright (c) 2020-2022 by Contributors * Copyright (c) 2020-2022 by Contributors
* \file array/tensordispatch.h * @file array/tensordispatch.h
* \brief This file defines the dispatcher of tensor operators to * @brief This file defines the dispatcher of tensor operators to
* framework-specific implementations. * framework-specific implementations.
* *
* The dispatcher consists of a TensorDispatcher singleton in DGL C library and * The dispatcher consists of a TensorDispatcher singleton in DGL C library and
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include "ndarray.h" #include "ndarray.h"
/*! /*!
* \brief Casts a pointer \c entry to a function pointer with signature of \c * @brief Casts a pointer \c entry to a function pointer with signature of \c
* func. * func.
*/ */
#define FUNCCAST(func, entry) (*reinterpret_cast<decltype(&(func))>(entry)) #define FUNCCAST(func, entry) (*reinterpret_cast<decltype(&(func))>(entry))
...@@ -48,31 +48,31 @@ namespace dgl { ...@@ -48,31 +48,31 @@ namespace dgl {
namespace runtime { namespace runtime {
/*! /*!
* \brief Dispatcher that delegates the function calls to framework-specific C++ * @brief Dispatcher that delegates the function calls to framework-specific C++
* APIs. * APIs.
* *
* This class is not thread-safe. * This class is not thread-safe.
*/ */
class TensorDispatcher { class TensorDispatcher {
public: public:
/*! \brief Get the singleton instance. */ /*! @brief Get the singleton instance. */
static TensorDispatcher* Global() { static TensorDispatcher* Global() {
static TensorDispatcher inst; static TensorDispatcher inst;
return &inst; return &inst;
} }
/*! \brief Whether an adapter library is available. */ /*! @brief Whether an adapter library is available. */
inline bool IsAvailable() { return available_; } inline bool IsAvailable() { return available_; }
/*! \brief Load symbols from the given tensor adapter library path. */ /*! @brief Load symbols from the given tensor adapter library path. */
bool Load(const char* path_cstr); bool Load(const char* path_cstr);
/*! /*!
* \brief Allocate a piece of CPU memory via PyTorch's CPUAllocator. * @brief Allocate a piece of CPU memory via PyTorch's CPUAllocator.
* Used in CPUDeviceAPI::AllocWorkspace(). * Used in CPUDeviceAPI::AllocWorkspace().
* *
* \param nbytes The size to be allocated. * @param nbytes The size to be allocated.
* \return Pointer to the allocated memory. * @return Pointer to the allocated memory.
*/ */
inline void* CPUAllocWorkspace(size_t nbytes) { inline void* CPUAllocWorkspace(size_t nbytes) {
auto entry = entrypoints_[Op::kCPURawAlloc]; auto entry = entrypoints_[Op::kCPURawAlloc];
...@@ -80,10 +80,10 @@ class TensorDispatcher { ...@@ -80,10 +80,10 @@ class TensorDispatcher {
} }
/*! /*!
* \brief Free the CPU memory. * @brief Free the CPU memory.
* Used in CPUDeviceAPI::FreeWorkspace(). * Used in CPUDeviceAPI::FreeWorkspace().
* *
* \param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
*/ */
inline void CPUFreeWorkspace(void* ptr) { inline void CPUFreeWorkspace(void* ptr) {
auto entry = entrypoints_[Op::kCPURawDelete]; auto entry = entrypoints_[Op::kCPURawDelete];
...@@ -92,17 +92,17 @@ class TensorDispatcher { ...@@ -92,17 +92,17 @@ class TensorDispatcher {
#ifdef DGL_USE_CUDA #ifdef DGL_USE_CUDA
/*! /*!
* \brief Allocate a piece of GPU memory via * @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator. * PyTorch's THCCachingAllocator.
* Used in CUDADeviceAPI::AllocWorkspace(). * Used in CUDADeviceAPI::AllocWorkspace().
* *
* \note THCCachingAllocator specify the device to allocate on * @note THCCachingAllocator specify the device to allocate on
* via cudaGetDevice(). Make sure to call cudaSetDevice() * via cudaGetDevice(). Make sure to call cudaSetDevice()
* before invoking this function. * before invoking this function.
* *
* \param nbytes The size to be allocated. * @param nbytes The size to be allocated.
* \param stream The stream to be allocated on. * @param stream The stream to be allocated on.
* \return Pointer to the allocated memory. * @return Pointer to the allocated memory.
*/ */
inline void* CUDAAllocWorkspace(size_t nbytes, cudaStream_t stream) { inline void* CUDAAllocWorkspace(size_t nbytes, cudaStream_t stream) {
auto entry = entrypoints_[Op::kCUDARawAlloc]; auto entry = entrypoints_[Op::kCUDARawAlloc];
...@@ -110,10 +110,10 @@ class TensorDispatcher { ...@@ -110,10 +110,10 @@ class TensorDispatcher {
} }
/*! /*!
* \brief Free the GPU memory. * @brief Free the GPU memory.
* Used in CUDADeviceAPI::FreeWorkspace(). * Used in CUDADeviceAPI::FreeWorkspace().
* *
* \param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
*/ */
inline void CUDAFreeWorkspace(void* ptr) { inline void CUDAFreeWorkspace(void* ptr) {
auto entry = entrypoints_[Op::kCUDARawDelete]; auto entry = entrypoints_[Op::kCUDARawDelete];
...@@ -121,14 +121,14 @@ class TensorDispatcher { ...@@ -121,14 +121,14 @@ class TensorDispatcher {
} }
/*! /*!
* \brief Find the current PyTorch CUDA stream * @brief Find the current PyTorch CUDA stream
* Used in runtime::getCurrentCUDAStream(). * Used in runtime::getCurrentCUDAStream().
* *
* \note PyTorch pre-allocates/sets the current CUDA stream * @note PyTorch pre-allocates/sets the current CUDA stream
* on current device via cudaGetDevice(). Make sure to call cudaSetDevice() * on current device via cudaGetDevice(). Make sure to call cudaSetDevice()
* before invoking this function. * before invoking this function.
* *
* \return cudaStream_t stream handle * @return cudaStream_t stream handle
*/ */
inline cudaStream_t CUDAGetCurrentStream() { inline cudaStream_t CUDAGetCurrentStream() {
auto entry = entrypoints_[Op::kCUDACurrentStream]; auto entry = entrypoints_[Op::kCUDACurrentStream];
...@@ -137,12 +137,12 @@ class TensorDispatcher { ...@@ -137,12 +137,12 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA #endif // DGL_USE_CUDA
/*! /*!
* \brief Record streams that are using this tensor. * @brief Record streams that are using this tensor.
* Used in NDArray::RecordStream(). * Used in NDArray::RecordStream().
* *
* \param ptr Pointer of the tensor to be recorded. * @param ptr Pointer of the tensor to be recorded.
* \param stream The stream that is using this tensor. * @param stream The stream that is using this tensor.
* \param device_id Device of the tensor. * @param device_id Device of the tensor.
*/ */
inline void RecordStream(void* ptr, DGLStreamHandle stream, int device_id) { inline void RecordStream(void* ptr, DGLStreamHandle stream, int device_id) {
#ifdef DGL_USE_CUDA #ifdef DGL_USE_CUDA
...@@ -153,13 +153,13 @@ class TensorDispatcher { ...@@ -153,13 +153,13 @@ class TensorDispatcher {
} }
private: private:
/*! \brief ctor */ /*! @brief ctor */
TensorDispatcher() = default; TensorDispatcher() = default;
/*! \brief dtor */ /*! @brief dtor */
~TensorDispatcher(); ~TensorDispatcher();
/*! /*!
* \brief List of symbols in the adapter library. * @brief List of symbols in the adapter library.
* *
* Must match the functions in tensoradapter/include/tensoradapter.h. * Must match the functions in tensoradapter/include/tensoradapter.h.
*/ */
...@@ -170,7 +170,7 @@ class TensorDispatcher { ...@@ -170,7 +170,7 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA #endif // DGL_USE_CUDA
}; };
/*! \brief Index of each function to the symbol list */ /*! @brief Index of each function to the symbol list */
class Op { class Op {
public: public:
static constexpr int kCPURawAlloc = 0; static constexpr int kCPURawAlloc = 0;
...@@ -183,10 +183,10 @@ class TensorDispatcher { ...@@ -183,10 +183,10 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA #endif // DGL_USE_CUDA
}; };
/*! \brief Number of functions */ /*! @brief Number of functions */
static constexpr int num_entries_ = sizeof(names_) / sizeof(names_[0]); static constexpr int num_entries_ = sizeof(names_) / sizeof(names_[0]);
/*! \brief Entrypoints of each function */ /*! @brief Entrypoints of each function */
void* entrypoints_[num_entries_] = { void* entrypoints_[num_entries_] = {
nullptr, nullptr, nullptr, nullptr,
#ifdef DGL_USE_CUDA #ifdef DGL_USE_CUDA
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file dgl/runtime/threading_backend.h * @file dgl/runtime/threading_backend.h
* \brief Utilities for manipulating thread pool threads. * @brief Utilities for manipulating thread pool threads.
*/ */
#ifndef DGL_RUNTIME_THREADING_BACKEND_H_ #ifndef DGL_RUNTIME_THREADING_BACKEND_H_
#define DGL_RUNTIME_THREADING_BACKEND_H_ #define DGL_RUNTIME_THREADING_BACKEND_H_
...@@ -15,7 +15,7 @@ namespace runtime { ...@@ -15,7 +15,7 @@ namespace runtime {
namespace threading { namespace threading {
/*! /*!
* \brief A platform-agnostic abstraction for managing a collection of * @brief A platform-agnostic abstraction for managing a collection of
* thread pool threads. * thread pool threads.
*/ */
class ThreadGroup { class ThreadGroup {
...@@ -23,13 +23,13 @@ class ThreadGroup { ...@@ -23,13 +23,13 @@ class ThreadGroup {
class Impl; class Impl;
/*! /*!
* \brief Creates a collection of threads which run a provided function. * @brief Creates a collection of threads which run a provided function.
* *
* \param num_workers The total number of worker threads in this group. * @param num_workers The total number of worker threads in this group.
Includes main thread if `exclude_worker0 = true` Includes main thread if `exclude_worker0 = true`
* \param worker_callback A callback which is run in its own thread. * @param worker_callback A callback which is run in its own thread.
Receives the worker_id as an argument. Receives the worker_id as an argument.
* \param exclude_worker0 Whether to use the main thread as a worker. * @param exclude_worker0 Whether to use the main thread as a worker.
* If `true`, worker0 will not be launched in a new thread and * If `true`, worker0 will not be launched in a new thread and
* `worker_callback` will only be called for values >= 1. This * `worker_callback` will only be called for values >= 1. This
* allows use of the main thread as a worker. * allows use of the main thread as a worker.
...@@ -40,7 +40,7 @@ class ThreadGroup { ...@@ -40,7 +40,7 @@ class ThreadGroup {
~ThreadGroup(); ~ThreadGroup();
/*! /*!
* \brief Blocks until all non-main threads in the pool finish. * @brief Blocks until all non-main threads in the pool finish.
*/ */
void Join(); void Join();
...@@ -50,16 +50,16 @@ class ThreadGroup { ...@@ -50,16 +50,16 @@ class ThreadGroup {
}; };
/*! /*!
* \brief configure the CPU id affinity * @brief configure the CPU id affinity
* *
* \param mode The preferred CPU type (1 = big, -1 = little). * @param mode The preferred CPU type (1 = big, -1 = little).
* \param nthreads The number of threads to use (0 = use all). * @param nthreads The number of threads to use (0 = use all).
* \param exclude_worker0 Whether to use the main thread as a worker. * @param exclude_worker0 Whether to use the main thread as a worker.
* If `true`, worker0 will not be launched in a new thread and * If `true`, worker0 will not be launched in a new thread and
* `worker_callback` will only be called for values >= 1. This * `worker_callback` will only be called for values >= 1. This
* allows use of the main thread as a worker. * allows use of the main thread as a worker.
* *
* \return The number of workers to use. * @return The number of workers to use.
*/ */
int Configure(AffinityMode mode, int nthreads, bool exclude_worker0); int Configure(AffinityMode mode, int nthreads, bool exclude_worker0);
...@@ -68,14 +68,14 @@ class ThreadGroup { ...@@ -68,14 +68,14 @@ class ThreadGroup {
}; };
/*! /*!
* \brief Platform-agnostic no-op. * @brief Platform-agnostic no-op.
*/ */
// This used to be Yield(), renaming to YieldThread() because windows.h defined // This used to be Yield(), renaming to YieldThread() because windows.h defined
// it as a macro in later SDKs. // it as a macro in later SDKs.
void YieldThread(); void YieldThread();
/*! /*!
* \return the maximum number of effective workers for this system. * @return the maximum number of effective workers for this system.
*/ */
int MaxConcurrency(); int MaxConcurrency();
......
/*! /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \file dgl/runtime/util.h * @file dgl/runtime/util.h
* \brief Useful runtime util. * @brief Useful runtime util.
*/ */
#ifndef DGL_RUNTIME_UTIL_H_ #ifndef DGL_RUNTIME_UTIL_H_
#define DGL_RUNTIME_UTIL_H_ #define DGL_RUNTIME_UTIL_H_
...@@ -12,11 +12,11 @@ namespace dgl { ...@@ -12,11 +12,11 @@ namespace dgl {
namespace runtime { namespace runtime {
/*! /*!
* \brief Check whether type matches the given spec. * @brief Check whether type matches the given spec.
* \param t The type * @param t The type
* \param code The type code. * @param code The type code.
* \param bits The number of bits to be matched. * @param bits The number of bits to be matched.
* \param lanes The number of lanes sin the type. * @param lanes The number of lanes sin the type.
*/ */
inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) { inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) {
return t.code == code && t.bits == bits && t.lanes == lanes; return t.code == code && t.bits == bits && t.lanes == lanes;
...@@ -28,7 +28,7 @@ inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) { ...@@ -28,7 +28,7 @@ inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) {
namespace dgl { namespace dgl {
namespace ir { namespace ir {
namespace intrinsic { namespace intrinsic {
/*! \brief The kind of structure field info used in intrinsic */ /*! @brief The kind of structure field info used in intrinsic */
enum DGLStructFieldKind : int { enum DGLStructFieldKind : int {
// array head address // array head address
kArrAddr, kArrAddr,
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file dgl/sampler.h * @file dgl/sampler.h
* \brief DGL sampler header. * @brief DGL sampler header.
*/ */
#ifndef DGL_SAMPLER_H_ #ifndef DGL_SAMPLER_H_
#define DGL_SAMPLER_H_ #define DGL_SAMPLER_H_
...@@ -21,17 +21,17 @@ class ImmutableGraph; ...@@ -21,17 +21,17 @@ class ImmutableGraph;
class SamplerOp { class SamplerOp {
public: public:
/*! /*!
* \brief Sample a graph from the seed vertices with neighbor sampling. * @brief Sample a graph from the seed vertices with neighbor sampling.
* The neighbors are sampled with a uniform distribution. * The neighbors are sampled with a uniform distribution.
* *
* \param graphs A graph for sampling. * @param graphs A graph for sampling.
* \param seeds the nodes where we should start to sample. * @param seeds the nodes where we should start to sample.
* \param edge_type the type of edges we should sample neighbors. * @param edge_type the type of edges we should sample neighbors.
* \param num_hops the number of hops to sample neighbors. * @param num_hops the number of hops to sample neighbors.
* \param expand_factor the max number of neighbors to sample. * @param expand_factor the max number of neighbors to sample.
* \param add_self_loop whether to add self loop to the sampled subgraph * @param add_self_loop whether to add self loop to the sampled subgraph
* \param probability the transition probability (float/double). * @param probability the transition probability (float/double).
* \return a NodeFlow graph. * @return a NodeFlow graph.
*/ */
template <typename ValueType> template <typename ValueType>
static NodeFlow NeighborSample( static NodeFlow NeighborSample(
...@@ -40,14 +40,14 @@ class SamplerOp { ...@@ -40,14 +40,14 @@ class SamplerOp {
const bool add_self_loop, const ValueType *probability); const bool add_self_loop, const ValueType *probability);
/*! /*!
* \brief Sample a graph from the seed vertices with layer sampling. * @brief Sample a graph from the seed vertices with layer sampling.
* The layers are sampled with a uniform distribution. * The layers are sampled with a uniform distribution.
* *
* \param graphs A graph for sampling. * @param graphs A graph for sampling.
* \param seeds the nodes where we should start to sample. * @param seeds the nodes where we should start to sample.
* \param edge_type the type of edges we should sample neighbors. * @param edge_type the type of edges we should sample neighbors.
* \param layer_sizes The size of layers. * @param layer_sizes The size of layers.
* \return a NodeFlow graph. * @return a NodeFlow graph.
*/ */
static NodeFlow LayerUniformSample( static NodeFlow LayerUniformSample(
const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds, const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file dgl/sampling/negative.h * @file dgl/sampling/negative.h
* \brief Negative sampling. * @brief Negative sampling.
*/ */
#ifndef DGL_SAMPLING_NEGATIVE_H_ #ifndef DGL_SAMPLING_NEGATIVE_H_
#define DGL_SAMPLING_NEGATIVE_H_ #define DGL_SAMPLING_NEGATIVE_H_
...@@ -15,24 +15,24 @@ namespace dgl { ...@@ -15,24 +15,24 @@ namespace dgl {
namespace sampling { namespace sampling {
/*! /*!
* \brief Given an edge type, uniformly sample source-destination pairs that do * @brief Given an edge type, uniformly sample source-destination pairs that do
* not have an edge in between using rejection sampling. * not have an edge in between using rejection sampling.
* *
* \note This function may not return the same number of elements as the given * @note This function may not return the same number of elements as the given
* number of samples. * number of samples.
* \note This function requires sorting the CSR or CSC matrix of the graph * @note This function requires sorting the CSR or CSC matrix of the graph
* in-place. It prefers CSC over CSR. * in-place. It prefers CSC over CSR.
* *
* \param hg The graph. * @param hg The graph.
* \param etype The edge type. * @param etype The edge type.
* \param num_samples The number of negative examples to sample. * @param num_samples The number of negative examples to sample.
* \param num_trials The number of rejection sampling trials. * @param num_trials The number of rejection sampling trials.
* \param exclude_self_loops Do not include the examples where the source equals * @param exclude_self_loops Do not include the examples where the source equals
* the destination. * the destination.
* \param replace Whether to sample with replacement. * @param replace Whether to sample with replacement.
* \param redundancy How much redundant negative examples to take in case of * @param redundancy How much redundant negative examples to take in case of
* duplicate examples. * duplicate examples.
* \return The pair of source and destination tensors. * @return The pair of source and destination tensors.
*/ */
std::pair<IdArray, IdArray> GlobalUniformNegativeSampling( std::pair<IdArray, IdArray> GlobalUniformNegativeSampling(
HeteroGraphPtr hg, dgl_type_t etype, int64_t num_samples, int num_trials, HeteroGraphPtr hg, dgl_type_t etype, int64_t num_samples, int num_trials,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file dgl/sampling/neighbor.h * @file dgl/sampling/neighbor.h
* \brief Neighborhood-based sampling. * @brief Neighborhood-based sampling.
*/ */
#ifndef DGL_SAMPLING_NEIGHBOR_H_ #ifndef DGL_SAMPLING_NEIGHBOR_H_
#define DGL_SAMPLING_NEIGHBOR_H_ #define DGL_SAMPLING_NEIGHBOR_H_
...@@ -15,7 +15,7 @@ namespace dgl { ...@@ -15,7 +15,7 @@ namespace dgl {
namespace sampling { namespace sampling {
/*! /*!
* \brief Sample from the neighbors of the given nodes and return the sampled * @brief Sample from the neighbors of the given nodes and return the sampled
* edges as a graph. * edges as a graph.
* *
* When sampling with replacement, the sampled subgraph could have parallel * When sampling with replacement, the sampled subgraph could have parallel
...@@ -24,21 +24,21 @@ namespace sampling { ...@@ -24,21 +24,21 @@ namespace sampling {
* For sampling without replace, if fanout > the number of neighbors, all the * For sampling without replace, if fanout > the number of neighbors, all the
* neighbors will be sampled. * neighbors will be sampled.
* *
* \param hg The input graph. * @param hg The input graph.
* \param nodes Node IDs of each type. The vector length must be equal to the * @param nodes Node IDs of each type. The vector length must be equal to the
* number of node types. Empty array is allowed. * number of node types. Empty array is allowed.
* \param fanouts Number of sampled neighbors for each edge type. The vector * @param fanouts Number of sampled neighbors for each edge type. The vector
* length should be equal to the number of edge types, or one if they all have * length should be equal to the number of edge types, or one if they all have
* the same fanout. * the same fanout.
* \param dir Edge direction. * @param dir Edge direction.
* \param probability A vector of 1D float arrays, indicating the transition * @param probability A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform * probability of each edge by edge type. An empty float array assumes uniform
* transition. * transition.
* \param exclude_edges Edges IDs of each type which will be excluded during * @param exclude_edges Edges IDs of each type which will be excluded during
* sampling. The vector length must be equal to the number of edges types. Empty * sampling. The vector length must be equal to the number of edges types. Empty
* array is allowed. * array is allowed.
* \param replace If true, sample with replacement. * @param replace If true, sample with replacement.
* \return Sampled neighborhoods as a graph. The return graph has the same * @return Sampled neighborhoods as a graph. The return graph has the same
* schema as the original one. * schema as the original one.
*/ */
HeteroSubgraph SampleNeighbors( HeteroSubgraph SampleNeighbors(
...@@ -53,17 +53,17 @@ HeteroSubgraph SampleNeighbors( ...@@ -53,17 +53,17 @@ HeteroSubgraph SampleNeighbors(
* *
* If k > the number of neighbors, all the neighbors are sampled. * If k > the number of neighbors, all the neighbors are sampled.
* *
* \param hg The input graph. * @param hg The input graph.
* \param nodes Node IDs of each type. The vector length must be equal to the * @param nodes Node IDs of each type. The vector length must be equal to the
* number of node types. Empty array is allowed. * number of node types. Empty array is allowed.
* \param k The k value for each edge type. The vector length should be equal to * @param k The k value for each edge type. The vector length should be equal to
* the number of edge types, or one if they all have the same fanout. * the number of edge types, or one if they all have the same fanout.
* \param dir Edge direction. * @param dir Edge direction.
* \param weight A vector of 1D float arrays, indicating the weights associated * @param weight A vector of 1D float arrays, indicating the weights associated
* witheach edge. * witheach edge.
* \param ascending If true, elements are sorted by ascending order, equivalent * @param ascending If true, elements are sorted by ascending order, equivalent
* to find the K smallest values. Otherwise, find K largest values. * to find the K smallest values. Otherwise, find K largest values.
* \return Sampled neighborhoods as a graph. The return graph has the same * @return Sampled neighborhoods as a graph. The return graph has the same
* schema as the original one. * schema as the original one.
*/ */
HeteroSubgraph SampleNeighborsTopk( HeteroSubgraph SampleNeighborsTopk(
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file dgl/samplinig/randomwalks.h * @file dgl/samplinig/randomwalks.h
* \brief Random walk functions. * @brief Random walk functions.
*/ */
#ifndef DGL_SAMPLING_RANDOMWALKS_H_ #ifndef DGL_SAMPLING_RANDOMWALKS_H_
#define DGL_SAMPLING_RANDOMWALKS_H_ #define DGL_SAMPLING_RANDOMWALKS_H_
...@@ -18,15 +18,15 @@ namespace dgl { ...@@ -18,15 +18,15 @@ namespace dgl {
namespace sampling { namespace sampling {
/*! /*!
* \brief Metapath-based random walk. * @brief Metapath-based random walk.
* \param hg The heterograph. * @param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the * @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath. * first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath. * @param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition * @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform * probability of each edge by edge type. An empty float array assumes uniform
* transition. * transition.
* \return A pair of * @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node * 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1. * IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs. * 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
...@@ -38,16 +38,16 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalk( ...@@ -38,16 +38,16 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalk(
const std::vector<FloatArray> &prob); const std::vector<FloatArray> &prob);
/*! /*!
* \brief Metapath-based random walk with restart probability. * @brief Metapath-based random walk with restart probability.
* \param hg The heterograph. * @param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the * @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath. * first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath. * @param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition * @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform * probability of each edge by edge type. An empty float array assumes uniform
* transition. * transition.
* \param restart_prob Restart probability. * @param restart_prob Restart probability.
* \return A pair of * @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node * 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1. * IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs. * 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
...@@ -59,19 +59,19 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalkWithRestart( ...@@ -59,19 +59,19 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalkWithRestart(
const std::vector<FloatArray> &prob, double restart_prob); const std::vector<FloatArray> &prob, double restart_prob);
/*! /*!
* \brief Metapath-based random walk with stepwise restart probability. Useful * @brief Metapath-based random walk with stepwise restart probability. Useful
* for PinSAGE-like models. * for PinSAGE-like models.
* \param hg The heterograph. * @param hg The heterograph.
* \param seeds A 1D array of seed nodes, with the type the source type of the * @param seeds A 1D array of seed nodes, with the type the source type of the
* first edge type in the metapath. * first edge type in the metapath.
* \param metapath A 1D array of edge types representing the metapath. * @param metapath A 1D array of edge types representing the metapath.
* \param prob A vector of 1D float arrays, indicating the transition * @param prob A vector of 1D float arrays, indicating the transition
* probability of each edge by edge type. An empty float array assumes uniform * probability of each edge by edge type. An empty float array assumes uniform
* transition. * transition.
* \param restart_prob Restart probability array which has the same number of * @param restart_prob Restart probability array which has the same number of
* elements as \c metapath, indicating the probability to terminate after * elements as \c metapath, indicating the probability to terminate after
* transition. * transition.
* \return A pair of * @return A pair of
* 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node * 1. One 2D array of shape (len(seeds), len(metapath) + 1) with node
* IDs. The paths that terminated early are padded with -1. * IDs. The paths that terminated early are padded with -1.
* 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs. * 2. One 2D array of shape (len(seeds), len(metapath)) with edge IDs.
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file dgl/scheduler.h * @file dgl/scheduler.h
* \brief Operations on graph index. * @brief Operations on graph index.
*/ */
#ifndef DGL_SCHEDULER_H_ #ifndef DGL_SCHEDULER_H_
#define DGL_SCHEDULER_H_ #define DGL_SCHEDULER_H_
...@@ -17,14 +17,14 @@ typedef dgl::runtime::NDArray IdArray; ...@@ -17,14 +17,14 @@ typedef dgl::runtime::NDArray IdArray;
namespace sched { namespace sched {
/*! /*!
* \brief Generate degree bucketing schedule * @brief Generate degree bucketing schedule
* \tparam IdType Graph's index data type, can be int32_t or int64_t * @tparam IdType Graph's index data type, can be int32_t or int64_t
* \param msg_ids The edge id for each message * @param msg_ids The edge id for each message
* \param vids The destination vertex for each message * @param vids The destination vertex for each message
* \param recv_ids The recv nodes (for checking zero degree nodes) * @param recv_ids The recv nodes (for checking zero degree nodes)
* \note If there are multiple messages going into the same destination vertex, * @note If there are multiple messages going into the same destination vertex,
* then there will be multiple copies of the destination vertex in vids. * then there will be multiple copies of the destination vertex in vids.
* \return a vector of 5 IdArrays for degree bucketing. The 5 arrays are: * @return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* degrees: degrees for each bucket * degrees: degrees for each bucket
* nids: destination node ids * nids: destination node ids
* nid_section: number of nodes in each bucket (used to split nids) * nid_section: number of nodes in each bucket (used to split nids)
...@@ -36,16 +36,16 @@ std::vector<IdArray> DegreeBucketing( ...@@ -36,16 +36,16 @@ std::vector<IdArray> DegreeBucketing(
const IdArray& msg_ids, const IdArray& vids, const IdArray& recv_ids); const IdArray& msg_ids, const IdArray& vids, const IdArray& recv_ids);
/*! /*!
* \brief Generate degree bucketing schedule for group_apply edge * @brief Generate degree bucketing schedule for group_apply edge
* \tparam IdType Graph's index data type, can be int32_t or int64_t * @tparam IdType Graph's index data type, can be int32_t or int64_t
* \param uids One end vertex of edge by which edges are grouped * @param uids One end vertex of edge by which edges are grouped
* \param vids The other end vertex of edge * @param vids The other end vertex of edge
* \param eids Edge ids * @param eids Edge ids
* \note This function always generate group_apply schedule based on degrees of * @note This function always generate group_apply schedule based on degrees of
* nodes in uids. Therefore, if group_apply by source nodes, then uids * nodes in uids. Therefore, if group_apply by source nodes, then uids
* should be source. If group_apply by destination nodes, then uids * should be source. If group_apply by destination nodes, then uids
* should be destination. * should be destination.
* \return a vector of 5 IdArrays for degree bucketing. The 5 arrays are: * @return a vector of 5 IdArrays for degree bucketing. The 5 arrays are:
* degrees: degrees for each bucket * degrees: degrees for each bucket
* new_uids: uids reordered by degree bucket * new_uids: uids reordered by degree bucket
* new_vids: vids reordered by degree bucket * new_vids: vids reordered by degree bucket
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file dgl/transform.h * @file dgl/transform.h
* \brief DGL graph transformations * @brief DGL graph transformations
*/ */
#ifndef DGL_TRANSFORM_H_ #ifndef DGL_TRANSFORM_H_
...@@ -19,17 +19,17 @@ namespace dgl { ...@@ -19,17 +19,17 @@ namespace dgl {
namespace transform { namespace transform {
/*! /*!
* \brief Given a list of graphs, remove the common nodes that do not have * @brief Given a list of graphs, remove the common nodes that do not have
* inbound and outbound edges. * inbound and outbound edges.
* *
* The graphs should have identical node ID space (i.e. should have the same set * The graphs should have identical node ID space (i.e. should have the same set
* of nodes, including types and IDs). * of nodes, including types and IDs).
* *
* \param graphs The list of graphs. * @param graphs The list of graphs.
* \param always_preserve The list of nodes to preserve regardless of whether * @param always_preserve The list of nodes to preserve regardless of whether
* the inbound or outbound edges exist. * the inbound or outbound edges exist.
* *
* \return A pair. The first element is the list of compacted graphs, and the * @return A pair. The first element is the list of compacted graphs, and the
* second element is the mapping from the compacted graphs and the original * second element is the mapping from the compacted graphs and the original
* graph. * graph.
*/ */
...@@ -38,7 +38,7 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs( ...@@ -38,7 +38,7 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs(
const std::vector<IdArray> &always_preserve); const std::vector<IdArray> &always_preserve);
/*! /*!
* \brief Convert a graph into a bipartite-structured graph for message passing. * @brief Convert a graph into a bipartite-structured graph for message passing.
* *
* Specifically, we create one node type \c ntype_l on the "left" side and * Specifically, we create one node type \c ntype_l on the "left" side and
* another node type \c ntype_r on the "right" side for each node type \c ntype. * another node type \c ntype_r on the "right" side for each node type \c ntype.
...@@ -65,17 +65,17 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs( ...@@ -65,17 +65,17 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs(
* output = x * output = x
* </code> * </code>
* *
* \param graph The graph. * @param graph The graph.
* \param rhs_nodes Designated nodes that would appear on the right side. * @param rhs_nodes Designated nodes that would appear on the right side.
* \param include_rhs_in_lhs If false, do not include the nodes of node type \c * @param include_rhs_in_lhs If false, do not include the nodes of node type \c
* ntype_r in \c ntype_l. * ntype_r in \c ntype_l.
* *
* \return A triplet containing * @return A triplet containing
* * The bipartite-structured graph, * * The bipartite-structured graph,
* * The induced node from the left side for each graph, * * The induced node from the left side for each graph,
* * The induced edges. * * The induced edges.
* *
* \note If include_rhs_in_lhs is true, then for each node type \c ntype, the * @note If include_rhs_in_lhs is true, then for each node type \c ntype, the
* nodes in rhs_nodes[ntype] would always appear first in the nodes of type \c * nodes in rhs_nodes[ntype] would always appear first in the nodes of type \c
* ntype_l in the new graph. * ntype_l in the new graph.
*/ */
...@@ -84,15 +84,15 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ToBlock( ...@@ -84,15 +84,15 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ToBlock(
bool include_rhs_in_lhs); bool include_rhs_in_lhs);
/*! /*!
* \brief Convert a multigraph to a simple graph. * @brief Convert a multigraph to a simple graph.
* *
* \return A triplet of * @return A triplet of
* * \c hg : The said simple graph. * * @c hg : The said simple graph.
* * \c count : The array of edge occurrences per edge type. * * @c count : The array of edge occurrences per edge type.
* * \c edge_map : The mapping from original edge IDs to new edge IDs per edge * * @c edge_map : The mapping from original edge IDs to new edge IDs per edge
* type. * type.
* *
* \note Example: consider a graph with the following edges * @note Example: consider a graph with the following edges
* *
* [(0, 1), (1, 3), (2, 2), (1, 3), (1, 4), (1, 4)] * [(0, 1), (1, 3), (2, 2), (1, 3), (1, 4), (1, 4)]
* *
...@@ -117,12 +117,12 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ...@@ -117,12 +117,12 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>>
ToSimpleGraph(const HeteroGraphPtr graph); ToSimpleGraph(const HeteroGraphPtr graph);
/*! /*!
* \brief Remove edges from a graph. * @brief Remove edges from a graph.
* *
* \param graph The graph. * @param graph The graph.
* \param eids The edge IDs to remove per edge type. * @param eids The edge IDs to remove per edge type.
* *
* \return A pair of the graph with edges removed, as well as the edge ID * @return A pair of the graph with edges removed, as well as the edge ID
* mapping from the original graph to the new graph per edge type. * mapping from the original graph to the new graph per edge type.
*/ */
std::pair<HeteroGraphPtr, std::vector<IdArray>> RemoveEdges( std::pair<HeteroGraphPtr, std::vector<IdArray>> RemoveEdges(
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file rpc/shared_mem_serializer.h * @file rpc/shared_mem_serializer.h
* \brief headers for serializer. * @brief headers for serializer.
*/ */
#ifndef DGL_ZEROCOPY_SERIALIZER_H_ #ifndef DGL_ZEROCOPY_SERIALIZER_H_
#define DGL_ZEROCOPY_SERIALIZER_H_ #define DGL_ZEROCOPY_SERIALIZER_H_
...@@ -63,10 +63,10 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -63,10 +63,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
}; };
/*! /*!
* \brief This constructor is for writing scenario or reading from local * @brief This constructor is for writing scenario or reading from local
* machine * machine
* \param strm The backup stream to write/load from * @param strm The backup stream to write/load from
* \param send_to_remote Whether this stream will be deserialized at remote * @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into * machine or the local machine. If true, will record the data pointer into
* buffer list. * buffer list.
*/ */
...@@ -75,9 +75,9 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -75,9 +75,9 @@ class StreamWithBuffer : public dmlc::SeekStream {
buffer_list_(), buffer_list_(),
send_to_remote_(send_to_remote) {} send_to_remote_(send_to_remote) {}
/*! /*!
* \brief This constructor is for reading from remote * @brief This constructor is for reading from remote
* \param strm The stream to write/load from zerocopy write/load * @param strm The stream to write/load from zerocopy write/load
* \param data_ptr_list list of pointer to reconstruct NDArray * @param data_ptr_list list of pointer to reconstruct NDArray
* *
* For example: * For example:
* std::string blob; * std::string blob;
...@@ -95,9 +95,9 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -95,9 +95,9 @@ class StreamWithBuffer : public dmlc::SeekStream {
} }
/*! /*!
* \brief Construct stream backed up by string * @brief Construct stream backed up by string
* \param blob The string to write/load from zerocopy write/load * @param blob The string to write/load from zerocopy write/load
* \param send_to_remote Whether this stream will be deserialized at remote * @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into * machine or the local machine. If true, will record the data pointer into
* buffer list. * buffer list.
*/ */
...@@ -106,10 +106,10 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -106,10 +106,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
send_to_remote_(send_to_remote) {} send_to_remote_(send_to_remote) {}
/*! /*!
* \brief Construct stream backed up by string * @brief Construct stream backed up by string
* \param p_buffer buffer pointer * @param p_buffer buffer pointer
* \param size buffer size * @param size buffer size
* \param send_to_remote Whether this stream will be deserialized at remote * @param send_to_remote Whether this stream will be deserialized at remote
* machine or the local machine. If true, will record the data pointer into * machine or the local machine. If true, will record the data pointer into
* buffer list. * buffer list.
*/ */
...@@ -118,10 +118,10 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -118,10 +118,10 @@ class StreamWithBuffer : public dmlc::SeekStream {
send_to_remote_(send_to_remote) {} send_to_remote_(send_to_remote) {}
/*! /*!
* \brief Construct stream backed up by string, and reconstruct NDArray * @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list * from data_ptr_list
* \param blob The string to write/load from zerocopy write/load * @param blob The string to write/load from zerocopy write/load
* \param data_ptr_list pointer list for NDArrays to deconstruct from * @param data_ptr_list pointer list for NDArrays to deconstruct from
*/ */
StreamWithBuffer(std::string* blob, const std::vector<void*>& data_ptr_list) StreamWithBuffer(std::string* blob, const std::vector<void*>& data_ptr_list)
: strm_(new dmlc::MemoryStringStream(blob)), send_to_remote_(true) { : strm_(new dmlc::MemoryStringStream(blob)), send_to_remote_(true) {
...@@ -131,11 +131,11 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -131,11 +131,11 @@ class StreamWithBuffer : public dmlc::SeekStream {
} }
/*! /*!
* \brief Construct stream backed up by string, and reconstruct NDArray * @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list * from data_ptr_list
* \param p_buffer buffer pointer * @param p_buffer buffer pointer
* \param size buffer size * @param size buffer size
* \param data_ptr_list pointer list for NDArrays to deconstruct from * @param data_ptr_list pointer list for NDArrays to deconstruct from
*/ */
StreamWithBuffer( StreamWithBuffer(
char* p_buffer, size_t size, const std::vector<void*>& data_ptr_list) char* p_buffer, size_t size, const std::vector<void*>& data_ptr_list)
...@@ -156,14 +156,14 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -156,14 +156,14 @@ class StreamWithBuffer : public dmlc::SeekStream {
using dmlc::Stream::Write; using dmlc::Stream::Write;
/*! /*!
* \brief push NDArray into stream * @brief push NDArray into stream
* If send_to_remote=true, the NDArray will be saved to the buffer list * If send_to_remote=true, the NDArray will be saved to the buffer list
* If send_to_remote=false, the NDArray will be saved to the backedup string * If send_to_remote=false, the NDArray will be saved to the backedup string
*/ */
void PushNDArray(const runtime::NDArray& tensor); void PushNDArray(const runtime::NDArray& tensor);
/*! /*!
* \brief pop NDArray from stream * @brief pop NDArray from stream
* If send_to_remote=true, the NDArray will be reconstructed from buffer list * If send_to_remote=true, the NDArray will be reconstructed from buffer list
* If send_to_remote=false, the NDArray will be reconstructed from shared * If send_to_remote=false, the NDArray will be reconstructed from shared
* memory * memory
...@@ -171,12 +171,12 @@ class StreamWithBuffer : public dmlc::SeekStream { ...@@ -171,12 +171,12 @@ class StreamWithBuffer : public dmlc::SeekStream {
dgl::runtime::NDArray PopNDArray(); dgl::runtime::NDArray PopNDArray();
/*! /*!
* \brief Get whether this stream is for remote usage * @brief Get whether this stream is for remote usage
*/ */
bool send_to_remote() { return send_to_remote_; } bool send_to_remote() { return send_to_remote_; }
/*! /*!
* \brief Get underlying buffer list * @brief Get underlying buffer list
*/ */
const std::deque<Buffer>& buffer_list() const { return buffer_list_; } const std::deque<Buffer>& buffer_list() const { return buffer_list_; }
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file intel/cpu_support.h * @file intel/cpu_support.h
* \brief Intel CPU support * @brief Intel CPU support
* \author Pawel Piotrowicz <pawel.piotrowicz@intel.com> * @author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
*/ */
#ifndef INTEL_CPU_SUPPORT_H_ #ifndef INTEL_CPU_SUPPORT_H_
#define INTEL_CPU_SUPPORT_H_ #define INTEL_CPU_SUPPORT_H_
...@@ -54,8 +54,8 @@ struct IntelKernel { ...@@ -54,8 +54,8 @@ struct IntelKernel {
}; };
/*! /*!
* \brief Element-wise addition kernel using Intel AVX512 instructions. * @brief Element-wise addition kernel using Intel AVX512 instructions.
* \note it uses AVX512. * @note it uses AVX512.
*/ */
template <class Op> template <class Op>
class ElemWiseAddUpdate : public Xbyak::CodeGenerator { class ElemWiseAddUpdate : public Xbyak::CodeGenerator {
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file intel/meta_utils.h * @file intel/meta_utils.h
* \brief Meta programming utils * @brief Meta programming utils
* \author Pawel Piotrowicz <pawel.piotrowicz@intel.com> * @author Pawel Piotrowicz <pawel.piotrowicz@intel.com>
*/ */
#ifndef INTEL_META_UTILS_H_ #ifndef INTEL_META_UTILS_H_
#define INTEL_META_UTILS_H_ #define INTEL_META_UTILS_H_
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file api/api_container.cc * @file api/api_container.cc
* \brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc) * @brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc)
*/ */
#include <dgl/packed_func_ext.h> #include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h> #include <dgl/runtime/container.h>
......
/*! /*!
* Copyright (c) 2022 by Contributors * Copyright (c) 2022 by Contributors
* \file api/api_test.cc * @file api/api_test.cc
* \brief C APIs for testing FFI * @brief C APIs for testing FFI
*/ */
#include <dgl/packed_func_ext.h> #include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h> #include <dgl/runtime/container.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/arith.h * @file array/arith.h
* \brief Arithmetic functors * @brief Arithmetic functors
*/ */
#ifndef DGL_ARRAY_ARITH_H_ #ifndef DGL_ARRAY_ARITH_H_
#define DGL_ARRAY_ARITH_H_ #define DGL_ARRAY_ARITH_H_
......
/*! /*!
* Copyright (c) 2019-2021 by Contributors * Copyright (c) 2019-2021 by Contributors
* \file array/array.cc * @file array/array.cc
* \brief DGL array utilities implementation * @brief DGL array utilities implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/graph_traversal.h> #include <dgl/graph_traversal.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/array_aritch.cc * @file array/array_aritch.cc
* \brief DGL array arithmetic operations * @brief DGL array arithmetic operations
*/ */
#include <dgl/packed_func_ext.h> #include <dgl/packed_func_ext.h>
#include <dgl/runtime/container.h> #include <dgl/runtime/container.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/array_op.h * @file array/array_op.h
* \brief Array operator templates * @brief Array operator templates
*/ */
#ifndef DGL_ARRAY_ARRAY_OP_H_ #ifndef DGL_ARRAY_ARRAY_OP_H_
#define DGL_ARRAY_ARRAY_OP_H_ #define DGL_ARRAY_ARRAY_OP_H_
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/check.h * @file array/check.h
* \brief DGL check utilities * @brief DGL check utilities
*/ */
#ifndef DGL_ARRAY_CHECK_H_ #ifndef DGL_ARRAY_CHECK_H_
#define DGL_ARRAY_CHECK_H_ #define DGL_ARRAY_CHECK_H_
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/array_cumsum.cc * @file array/cpu/array_cumsum.cc
* \brief Array cumsum CPU implementation * @brief Array cumsum CPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment