Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*! /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \file thread_pool.cc * @file thread_pool.cc
* \brief Threadpool for multi-threading runtime. * @brief Threadpool for multi-threading runtime.
*/ */
#include <dgl/runtime/c_backend_api.h> #include <dgl/runtime/c_backend_api.h>
#include <dgl/runtime/c_runtime_api.h> #include <dgl/runtime/c_runtime_api.h>
...@@ -31,7 +31,7 @@ namespace runtime { ...@@ -31,7 +31,7 @@ namespace runtime {
constexpr int kSyncStride = 64 / sizeof(std::atomic<int>); constexpr int kSyncStride = 64 / sizeof(std::atomic<int>);
/*! /*!
* \brief Thread local master environment. * @brief Thread local master environment.
*/ */
class ParallelLauncher { class ParallelLauncher {
public: public:
...@@ -112,10 +112,10 @@ class ParallelLauncher { ...@@ -112,10 +112,10 @@ class ParallelLauncher {
std::vector<std::string> par_errors_; std::vector<std::string> par_errors_;
}; };
/*! \brief Lock-free single-producer-single-consumer queue for each thread */ /*! @brief Lock-free single-producer-single-consumer queue for each thread */
class SpscTaskQueue { class SpscTaskQueue {
public: public:
/*! \brief The task entry */ /*! @brief The task entry */
struct Task { struct Task {
ParallelLauncher* launcher; ParallelLauncher* launcher;
int32_t task_id; int32_t task_id;
...@@ -126,8 +126,8 @@ class SpscTaskQueue { ...@@ -126,8 +126,8 @@ class SpscTaskQueue {
~SpscTaskQueue() { delete[] buffer_; } ~SpscTaskQueue() { delete[] buffer_; }
/*! /*!
* \brief Push a task into the queue and notify the comsumer if it is on wait. * @brief Push a task into the queue and notify the comsumer if it is on wait.
* \param input The task to be dequeued. * @param input The task to be dequeued.
*/ */
void Push(const Task& input) { void Push(const Task& input) {
while (!Enqueue(input)) { while (!Enqueue(input)) {
...@@ -140,10 +140,10 @@ class SpscTaskQueue { ...@@ -140,10 +140,10 @@ class SpscTaskQueue {
} }
/*! /*!
* \brief Pop a task out of the queue and condition wait if no tasks. * @brief Pop a task out of the queue and condition wait if no tasks.
* \param output The pointer to the task to be dequeued. * @param output The pointer to the task to be dequeued.
* \param spin_count The number of iterations to spin before sleep. * @param spin_count The number of iterations to spin before sleep.
* \return Whether pop is successful (true) or we need to exit now (false). * @return Whether pop is successful (true) or we need to exit now (false).
*/ */
bool Pop(Task* output, uint32_t spin_count = 300000) { bool Pop(Task* output, uint32_t spin_count = 300000) {
// Busy wait a bit when the queue is empty. // Busy wait a bit when the queue is empty.
...@@ -170,7 +170,7 @@ class SpscTaskQueue { ...@@ -170,7 +170,7 @@ class SpscTaskQueue {
} }
/*! /*!
* \brief Signal to terminate the worker. * @brief Signal to terminate the worker.
*/ */
void SignalForKill() { void SignalForKill() {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
...@@ -180,9 +180,9 @@ class SpscTaskQueue { ...@@ -180,9 +180,9 @@ class SpscTaskQueue {
protected: protected:
/*! /*!
* \brief Lock-free enqueue. * @brief Lock-free enqueue.
* \param input The task to be enqueued. * @param input The task to be enqueued.
* \return Whether the task is enqueued. * @return Whether the task is enqueued.
*/ */
bool Enqueue(const Task& input) { bool Enqueue(const Task& input) {
if (exit_now_.load(std::memory_order_relaxed)) return false; if (exit_now_.load(std::memory_order_relaxed)) return false;
......
/*! /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \file thread_storage_scope.h * @file thread_storage_scope.h
* \brief Extract thread axis configuration from DGLArgs. * @brief Extract thread axis configuration from DGLArgs.
*/ */
#ifndef DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_ #ifndef DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
#define DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_ #define DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
...@@ -15,29 +15,29 @@ namespace dgl { ...@@ -15,29 +15,29 @@ namespace dgl {
namespace runtime { namespace runtime {
/*! /*!
* \brief Memory hierachy rank in the storage system * @brief Memory hierachy rank in the storage system
* \note The global rank and shared rank have one to one * @note The global rank and shared rank have one to one
* correspondence to the thread rank. * correspondence to the thread rank.
*/ */
enum class StorageRank { enum class StorageRank {
/*! \brief global memory */ /*! @brief global memory */
kGlobal = 0, kGlobal = 0,
/*! \brief shared memory among thread group */ /*! @brief shared memory among thread group */
kShared = 1, kShared = 1,
/*! /*!
* \brief reserved for warp memory. * @brief reserved for warp memory.
* This is only used by programming model. * This is only used by programming model.
* There is no such memory usually in GPU. * There is no such memory usually in GPU.
* Instead, we can simulate it by registers and shuffle. * Instead, we can simulate it by registers and shuffle.
*/ */
kWarp = 2, kWarp = 2,
/*! \brief thread local memory */ /*! @brief thread local memory */
kLocal = 3 kLocal = 3
}; };
/*! /*!
* \param thread_scope_rank The thread scope rank * @param thread_scope_rank The thread scope rank
* \return default storage rank given the thread scope * @return default storage rank given the thread scope
*/ */
inline StorageRank DefaultStorageRank(int thread_scope_rank) { inline StorageRank DefaultStorageRank(int thread_scope_rank) {
switch (thread_scope_rank) { switch (thread_scope_rank) {
...@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) { ...@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) {
} }
} }
/*! \brief class to represent storage scope */ /*! @brief class to represent storage scope */
struct StorageScope { struct StorageScope {
/*! \brief The rank of the storage */ /*! @brief The rank of the storage */
StorageRank rank{StorageRank::kGlobal}; StorageRank rank{StorageRank::kGlobal};
/*! \brief tag for special purpose memory. */ /*! @brief tag for special purpose memory. */
std::string tag; std::string tag;
// comparator // comparator
inline bool operator==(const StorageScope& other) const { inline bool operator==(const StorageScope& other) const {
...@@ -84,9 +84,9 @@ struct StorageScope { ...@@ -84,9 +84,9 @@ struct StorageScope {
} }
} }
/*! /*!
* \brief make storage scope from string * @brief make storage scope from string
* \param s The string to be parsed. * @param s The string to be parsed.
* \return The storage scope. * @return The storage scope.
*/ */
static StorageScope make(const std::string& s) { static StorageScope make(const std::string& s) {
StorageScope r; StorageScope r;
...@@ -109,16 +109,16 @@ struct StorageScope { ...@@ -109,16 +109,16 @@ struct StorageScope {
} }
}; };
/*! \brief class to represent thread scope */ /*! @brief class to represent thread scope */
struct ThreadScope { struct ThreadScope {
/*! \brief The rank of thread scope */ /*! @brief The rank of thread scope */
int rank{0}; int rank{0};
/*! \brief the dimension index under the rank */ /*! @brief the dimension index under the rank */
int dim_index{0}; int dim_index{0};
/*! /*!
* \brief make storage scope from string * @brief make storage scope from string
* \param s The string to be parsed. * @param s The string to be parsed.
* \return The storage scope. * @return The storage scope.
*/ */
static ThreadScope make(const std::string& s) { static ThreadScope make(const std::string& s) {
ThreadScope r; ThreadScope r;
...@@ -139,22 +139,22 @@ struct ThreadScope { ...@@ -139,22 +139,22 @@ struct ThreadScope {
} }
}; };
/*! \brief workload speccification */ /*! @brief workload speccification */
struct ThreadWorkLoad { struct ThreadWorkLoad {
// array, first three are thread configuration. // array, first three are thread configuration.
size_t work_size[6]; size_t work_size[6];
/*! /*!
* \param i The block dimension. * @param i The block dimension.
* \return i-th block dim * @return i-th block dim
*/ */
inline size_t block_dim(size_t i) const { return work_size[i + 3]; } inline size_t block_dim(size_t i) const { return work_size[i + 3]; }
/*! /*!
* \param i The grid dimension. * @param i The grid dimension.
* \return i-th grid dim * @return i-th grid dim
*/ */
inline size_t grid_dim(size_t i) const { return work_size[i]; } inline size_t grid_dim(size_t i) const { return work_size[i]; }
}; };
/*! \brief Thread axis configuration */ /*! @brief Thread axis configuration */
class ThreadAxisConfig { class ThreadAxisConfig {
public: public:
void Init(size_t base, const std::vector<std::string>& thread_axis_tags) { void Init(size_t base, const std::vector<std::string>& thread_axis_tags) {
...@@ -187,11 +187,11 @@ class ThreadAxisConfig { ...@@ -187,11 +187,11 @@ class ThreadAxisConfig {
size_t work_dim() const { return work_dim_; } size_t work_dim() const { return work_dim_; }
private: private:
/*! \brief base axis */ /*! @brief base axis */
size_t base_; size_t base_;
/*! \brief The worker dimension */ /*! @brief The worker dimension */
size_t work_dim_; size_t work_dim_;
/*! \brief The index mapping. */ /*! @brief The index mapping. */
std::vector<uint32_t> arg_index_map_; std::vector<uint32_t> arg_index_map_;
}; };
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file threading_backend.cc * @file threading_backend.cc
* \brief Native threading backend * @brief Native threading backend
*/ */
#include <dgl/runtime/threading_backend.h> #include <dgl/runtime/threading_backend.h>
#include <dmlc/logging.h> #include <dmlc/logging.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file utils.cc * @file utils.cc
* \brief DGL util functions * @brief DGL util functions
*/ */
#include <dgl/aten/coo.h> #include <dgl/aten/coo.h>
......
/*! /*!
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* \file ndarray_partition.h * @file ndarray_partition.h
* \brief Operations on partition implemented in CUDA. * @brief Operations on partition implemented in CUDA.
*/ */
#ifndef DGL_RUNTIME_WORKSPACE_H_ #ifndef DGL_RUNTIME_WORKSPACE_H_
......
/*! /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \file workspace_pool.h * @file workspace_pool.h
* \brief Workspace pool utility. * @brief Workspace pool utility.
*/ */
#include "workspace_pool.h" #include "workspace_pool.h"
...@@ -108,14 +108,14 @@ class WorkspacePool::Pool { ...@@ -108,14 +108,14 @@ class WorkspacePool::Pool {
} }
private: private:
/*! \brief a single entry in the pool */ /*! @brief a single entry in the pool */
struct Entry { struct Entry {
void* data; void* data;
size_t size; size_t size;
}; };
/*! \brief List of free items, sorted from small to big size */ /*! @brief List of free items, sorted from small to big size */
std::vector<Entry> free_list_; std::vector<Entry> free_list_;
/*! \brief List of allocated items */ /*! @brief List of allocated items */
std::vector<Entry> allocated_; std::vector<Entry> allocated_;
}; };
......
/*! /*!
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* \file workspace_pool.h * @file workspace_pool.h
* \brief Workspace pool utility. * @brief Workspace pool utility.
*/ */
#ifndef DGL_RUNTIME_WORKSPACE_POOL_H_ #ifndef DGL_RUNTIME_WORKSPACE_POOL_H_
#define DGL_RUNTIME_WORKSPACE_POOL_H_ #define DGL_RUNTIME_WORKSPACE_POOL_H_
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
namespace dgl { namespace dgl {
namespace runtime { namespace runtime {
/*! /*!
* \brief A workspace pool to manage * @brief A workspace pool to manage
* *
* \note We have the following assumption about backend temporal * \note We have the following assumption about backend temporal
* workspace allocation, and will optimize for such assumption, * workspace allocation, and will optimize for such assumption,
...@@ -27,34 +27,34 @@ namespace runtime { ...@@ -27,34 +27,34 @@ namespace runtime {
class WorkspacePool { class WorkspacePool {
public: public:
/*! /*!
* \brief Create pool with specific device type and device. * @brief Create pool with specific device type and device.
* \param device_type The device type. * @param device_type The device type.
* \param device The device API. * @param device The device API.
*/ */
WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device); WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device);
/*! \brief destructor */ /*! @brief destructor */
~WorkspacePool(); ~WorkspacePool();
/*! /*!
* \brief Allocate temporal workspace. * @brief Allocate temporal workspace.
* \param ctx The context of allocation. * @param ctx The context of allocation.
* \param size The size to be allocated. * @param size The size to be allocated.
*/ */
void* AllocWorkspace(DGLContext ctx, size_t size); void* AllocWorkspace(DGLContext ctx, size_t size);
/*! /*!
* \brief Free temporal workspace in backend execution. * @brief Free temporal workspace in backend execution.
* *
* \param ctx The context of allocation. * @param ctx The context of allocation.
* \param ptr The pointer to be freed. * @param ptr The pointer to be freed.
*/ */
void FreeWorkspace(DGLContext ctx, void* ptr); void FreeWorkspace(DGLContext ctx, void* ptr);
private: private:
class Pool; class Pool;
/*! \brief pool of device local array */ /*! @brief pool of device local array */
std::vector<Pool*> array_; std::vector<Pool*> array_;
/*! \brief device type this pool support */ /*! @brief device type this pool support */
DGLDeviceType device_type_; DGLDeviceType device_type_;
/*! \brief The device API */ /*! @brief The device API */
std::shared_ptr<DeviceAPI> device_; std::shared_ptr<DeviceAPI> device_;
}; };
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file scheduler/scheduler.cc * @file scheduler/scheduler.cc
* \brief DGL Scheduler implementation * @brief DGL Scheduler implementation
*/ */
#include <dgl/scheduler.h> #include <dgl/scheduler.h>
......
/*! /*!
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* \file scheduler/scheduler_apis.cc * @file scheduler/scheduler_apis.cc
* \brief DGL scheduler APIs * @brief DGL scheduler APIs
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/graph.h> #include <dgl/graph.h>
......
/*! /*!
* Copyright (c) 2020-2022 by Contributors * Copyright (c) 2020-2022 by Contributors
* \file tensoradapter.h * @file tensoradapter.h
* \brief Header file for functions exposed by the adapter library. * @brief Header file for functions exposed by the adapter library.
* *
* Functions in this library must be exported with extern "C" so that DGL can * Functions in this library must be exported with extern "C" so that DGL can
* locate them with dlsym(3) (or GetProcAddress on Windows). * locate them with dlsym(3) (or GetProcAddress on Windows).
...@@ -19,50 +19,50 @@ namespace tensoradapter { ...@@ -19,50 +19,50 @@ namespace tensoradapter {
extern "C" { extern "C" {
/*! /*!
* \brief Allocate a piece of CPU memory via * @brief Allocate a piece of CPU memory via
* PyTorch's CPUAllocator * PyTorch's CPUAllocator
* *
* \param nbytes The size to be allocated. * @param nbytes The size to be allocated.
* \return Pointer to the allocated memory. * @return Pointer to the allocated memory.
*/ */
void* CPURawAlloc(size_t nbytes); void* CPURawAlloc(size_t nbytes);
/*! /*!
* \brief Free the CPU memory. * @brief Free the CPU memory.
* *
* \param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
*/ */
void CPURawDelete(void* ptr); void CPURawDelete(void* ptr);
#ifdef DGL_USE_CUDA #ifdef DGL_USE_CUDA
/*! /*!
* \brief Allocate a piece of GPU memory via * @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator. * PyTorch's THCCachingAllocator.
* *
* \param nbytes The size to be allocated. * @param nbytes The size to be allocated.
* \param stream The stream to be allocated on. * @param stream The stream to be allocated on.
* \return Pointer to the allocated memory. * @return Pointer to the allocated memory.
*/ */
void* CUDARawAlloc(size_t nbytes, cudaStream_t stream); void* CUDARawAlloc(size_t nbytes, cudaStream_t stream);
/*! /*!
* \brief Free the GPU memory. * @brief Free the GPU memory.
* *
* \param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
*/ */
void CUDARawDelete(void* ptr); void CUDARawDelete(void* ptr);
/*! /*!
* \brief Get the current CUDA stream. * @brief Get the current CUDA stream.
*/ */
cudaStream_t CUDACurrentStream(); cudaStream_t CUDACurrentStream();
/*! /*!
* \brief Let the caching allocator know which streams are using this tensor. * @brief Let the caching allocator know which streams are using this tensor.
* *
* \param ptr Pointer of the tensor to be recorded. * @param ptr Pointer of the tensor to be recorded.
* \param stream The stream that is using this tensor. * @param stream The stream that is using this tensor.
* \param device_id Device of the tensor. * @param device_id Device of the tensor.
*/ */
void RecordStream(void* ptr, cudaStream_t stream, int device_id); void RecordStream(void* ptr, cudaStream_t stream, int device_id);
#endif // DGL_USE_CUDA #endif // DGL_USE_CUDA
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file tensoradapter_exports.h * @file tensoradapter_exports.h
* \brief Header file for functions exposed by the adapter library. * @brief Header file for functions exposed by the adapter library.
*/ */
#ifndef TENSORADAPTER_EXPORTS_H_ #ifndef TENSORADAPTER_EXPORTS_H_
......
/*! /*!
* Copyright (c) 2020-2022 by Contributors * Copyright (c) 2020-2022 by Contributors
* \file torch/torch.cpp * @file torch/torch.cpp
* \brief Implementation of PyTorch adapter library. * @brief Implementation of PyTorch adapter library.
*/ */
#include <c10/core/CPUAllocator.h> #include <c10/core/CPUAllocator.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file graph_index_test.cc * @file graph_index_test.cc
* \brief Test GraphIndex * @brief Test GraphIndex
*/ */
#include <dgl/graph.h> #include <dgl/graph.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file msg_queue.cc * @file msg_queue.cc
* \brief Message queue for DGL distributed training. * @brief Message queue for DGL distributed training.
*/ */
#include <gtest/gtest.h> #include <gtest/gtest.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file socket_communicator_test.cc * @file socket_communicator_test.cc
* \brief Test SocketCommunicator * @brief Test SocketCommunicator
*/ */
#include "../src/rpc/network/socket_communicator.h" #include "../src/rpc/network/socket_communicator.h"
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file string_test.cc * @file string_test.cc
* \brief Test String Common * @brief Test String Common
*/ */
#include <gtest/gtest.h> #include <gtest/gtest.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file test_unit_graph.cc * @file test_unit_graph.cc
* \brief Test UnitGraph * @brief Test UnitGraph
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/immutable_graph.h> #include <dgl/immutable_graph.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment