Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*!
* Copyright (c) 2017 by Contributors
* \file thread_pool.cc
* \brief Threadpool for multi-threading runtime.
* @file thread_pool.cc
* @brief Threadpool for multi-threading runtime.
*/
#include <dgl/runtime/c_backend_api.h>
#include <dgl/runtime/c_runtime_api.h>
......@@ -31,7 +31,7 @@ namespace runtime {
constexpr int kSyncStride = 64 / sizeof(std::atomic<int>);
/*!
* \brief Thread local master environment.
* @brief Thread local master environment.
*/
class ParallelLauncher {
public:
......@@ -112,10 +112,10 @@ class ParallelLauncher {
std::vector<std::string> par_errors_;
};
/*! \brief Lock-free single-producer-single-consumer queue for each thread */
/*! @brief Lock-free single-producer-single-consumer queue for each thread */
class SpscTaskQueue {
public:
/*! \brief The task entry */
/*! @brief The task entry */
struct Task {
ParallelLauncher* launcher;
int32_t task_id;
......@@ -126,8 +126,8 @@ class SpscTaskQueue {
~SpscTaskQueue() { delete[] buffer_; }
/*!
* \brief Push a task into the queue and notify the comsumer if it is on wait.
* \param input The task to be dequeued.
* @brief Push a task into the queue and notify the comsumer if it is on wait.
* @param input The task to be dequeued.
*/
void Push(const Task& input) {
while (!Enqueue(input)) {
......@@ -140,10 +140,10 @@ class SpscTaskQueue {
}
/*!
* \brief Pop a task out of the queue and condition wait if no tasks.
* \param output The pointer to the task to be dequeued.
* \param spin_count The number of iterations to spin before sleep.
* \return Whether pop is successful (true) or we need to exit now (false).
* @brief Pop a task out of the queue and condition wait if no tasks.
* @param output The pointer to the task to be dequeued.
* @param spin_count The number of iterations to spin before sleep.
* @return Whether pop is successful (true) or we need to exit now (false).
*/
bool Pop(Task* output, uint32_t spin_count = 300000) {
// Busy wait a bit when the queue is empty.
......@@ -170,7 +170,7 @@ class SpscTaskQueue {
}
/*!
* \brief Signal to terminate the worker.
* @brief Signal to terminate the worker.
*/
void SignalForKill() {
std::lock_guard<std::mutex> lock(mutex_);
......@@ -180,9 +180,9 @@ class SpscTaskQueue {
protected:
/*!
* \brief Lock-free enqueue.
* \param input The task to be enqueued.
* \return Whether the task is enqueued.
* @brief Lock-free enqueue.
* @param input The task to be enqueued.
* @return Whether the task is enqueued.
*/
bool Enqueue(const Task& input) {
if (exit_now_.load(std::memory_order_relaxed)) return false;
......
/*!
* Copyright (c) 2017 by Contributors
* \file thread_storage_scope.h
* \brief Extract thread axis configuration from DGLArgs.
* @file thread_storage_scope.h
* @brief Extract thread axis configuration from DGLArgs.
*/
#ifndef DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
#define DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
......@@ -15,29 +15,29 @@ namespace dgl {
namespace runtime {
/*!
* \brief Memory hierachy rank in the storage system
* \note The global rank and shared rank have one to one
* @brief Memory hierachy rank in the storage system
* @note The global rank and shared rank have one to one
* correspondence to the thread rank.
*/
enum class StorageRank {
/*! \brief global memory */
/*! @brief global memory */
kGlobal = 0,
/*! \brief shared memory among thread group */
/*! @brief shared memory among thread group */
kShared = 1,
/*!
* \brief reserved for warp memory.
* @brief reserved for warp memory.
* This is only used by programming model.
* There is no such memory usually in GPU.
* Instead, we can simulate it by registers and shuffle.
*/
kWarp = 2,
/*! \brief thread local memory */
/*! @brief thread local memory */
kLocal = 3
};
/*!
* \param thread_scope_rank The thread scope rank
* \return default storage rank given the thread scope
* @param thread_scope_rank The thread scope rank
* @return default storage rank given the thread scope
*/
inline StorageRank DefaultStorageRank(int thread_scope_rank) {
switch (thread_scope_rank) {
......@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) {
}
}
/*! \brief class to represent storage scope */
/*! @brief class to represent storage scope */
struct StorageScope {
/*! \brief The rank of the storage */
/*! @brief The rank of the storage */
StorageRank rank{StorageRank::kGlobal};
/*! \brief tag for special purpose memory. */
/*! @brief tag for special purpose memory. */
std::string tag;
// comparator
inline bool operator==(const StorageScope& other) const {
......@@ -84,9 +84,9 @@ struct StorageScope {
}
}
/*!
* \brief make storage scope from string
* \param s The string to be parsed.
* \return The storage scope.
* @brief make storage scope from string
* @param s The string to be parsed.
* @return The storage scope.
*/
static StorageScope make(const std::string& s) {
StorageScope r;
......@@ -109,16 +109,16 @@ struct StorageScope {
}
};
/*! \brief class to represent thread scope */
/*! @brief class to represent thread scope */
struct ThreadScope {
/*! \brief The rank of thread scope */
/*! @brief The rank of thread scope */
int rank{0};
/*! \brief the dimension index under the rank */
/*! @brief the dimension index under the rank */
int dim_index{0};
/*!
* \brief make storage scope from string
* \param s The string to be parsed.
* \return The storage scope.
* @brief make storage scope from string
* @param s The string to be parsed.
* @return The storage scope.
*/
static ThreadScope make(const std::string& s) {
ThreadScope r;
......@@ -139,22 +139,22 @@ struct ThreadScope {
}
};
/*! \brief workload speccification */
/*! @brief workload speccification */
struct ThreadWorkLoad {
// array, first three are thread configuration.
size_t work_size[6];
/*!
* \param i The block dimension.
* \return i-th block dim
* @param i The block dimension.
* @return i-th block dim
*/
inline size_t block_dim(size_t i) const { return work_size[i + 3]; }
/*!
* \param i The grid dimension.
* \return i-th grid dim
* @param i The grid dimension.
* @return i-th grid dim
*/
inline size_t grid_dim(size_t i) const { return work_size[i]; }
};
/*! \brief Thread axis configuration */
/*! @brief Thread axis configuration */
class ThreadAxisConfig {
public:
void Init(size_t base, const std::vector<std::string>& thread_axis_tags) {
......@@ -187,11 +187,11 @@ class ThreadAxisConfig {
size_t work_dim() const { return work_dim_; }
private:
/*! \brief base axis */
/*! @brief base axis */
size_t base_;
/*! \brief The worker dimension */
/*! @brief The worker dimension */
size_t work_dim_;
/*! \brief The index mapping. */
/*! @brief The index mapping. */
std::vector<uint32_t> arg_index_map_;
};
......
/*!
* Copyright (c) 2018 by Contributors
* \file threading_backend.cc
* \brief Native threading backend
* @file threading_backend.cc
* @brief Native threading backend
*/
#include <dgl/runtime/threading_backend.h>
#include <dmlc/logging.h>
......
/*!
* Copyright (c) 2020 by Contributors
* \file utils.cc
* \brief DGL util functions
* @file utils.cc
* @brief DGL util functions
*/
#include <dgl/aten/coo.h>
......
/*!
* Copyright (c) 2021 by Contributors
* \file ndarray_partition.h
* \brief Operations on partition implemented in CUDA.
* @file ndarray_partition.h
* @brief Operations on partition implemented in CUDA.
*/
#ifndef DGL_RUNTIME_WORKSPACE_H_
......
/*!
* Copyright (c) 2017 by Contributors
* \file workspace_pool.h
* \brief Workspace pool utility.
* @file workspace_pool.h
* @brief Workspace pool utility.
*/
#include "workspace_pool.h"
......@@ -108,14 +108,14 @@ class WorkspacePool::Pool {
}
private:
/*! \brief a single entry in the pool */
/*! @brief a single entry in the pool */
struct Entry {
void* data;
size_t size;
};
/*! \brief List of free items, sorted from small to big size */
/*! @brief List of free items, sorted from small to big size */
std::vector<Entry> free_list_;
/*! \brief List of allocated items */
/*! @brief List of allocated items */
std::vector<Entry> allocated_;
};
......
/*!
* Copyright (c) 2017 by Contributors
* \file workspace_pool.h
* \brief Workspace pool utility.
* @file workspace_pool.h
* @brief Workspace pool utility.
*/
#ifndef DGL_RUNTIME_WORKSPACE_POOL_H_
#define DGL_RUNTIME_WORKSPACE_POOL_H_
......@@ -14,7 +14,7 @@
namespace dgl {
namespace runtime {
/*!
* \brief A workspace pool to manage
* @brief A workspace pool to manage
*
* \note We have the following assumption about backend temporal
* workspace allocation, and will optimize for such assumption,
......@@ -27,34 +27,34 @@ namespace runtime {
class WorkspacePool {
public:
/*!
* \brief Create pool with specific device type and device.
* \param device_type The device type.
* \param device The device API.
* @brief Create pool with specific device type and device.
* @param device_type The device type.
* @param device The device API.
*/
WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device);
/*! \brief destructor */
/*! @brief destructor */
~WorkspacePool();
/*!
* \brief Allocate temporal workspace.
* \param ctx The context of allocation.
* \param size The size to be allocated.
* @brief Allocate temporal workspace.
* @param ctx The context of allocation.
* @param size The size to be allocated.
*/
void* AllocWorkspace(DGLContext ctx, size_t size);
/*!
* \brief Free temporal workspace in backend execution.
* @brief Free temporal workspace in backend execution.
*
* \param ctx The context of allocation.
* \param ptr The pointer to be freed.
* @param ctx The context of allocation.
* @param ptr The pointer to be freed.
*/
void FreeWorkspace(DGLContext ctx, void* ptr);
private:
class Pool;
/*! \brief pool of device local array */
/*! @brief pool of device local array */
std::vector<Pool*> array_;
/*! \brief device type this pool support */
/*! @brief device type this pool support */
DGLDeviceType device_type_;
/*! \brief The device API */
/*! @brief The device API */
std::shared_ptr<DeviceAPI> device_;
};
......
/*!
* Copyright (c) 2018 by Contributors
* \file scheduler/scheduler.cc
* \brief DGL Scheduler implementation
* @file scheduler/scheduler.cc
* @brief DGL Scheduler implementation
*/
#include <dgl/scheduler.h>
......
/*!
* Copyright (c) 2018 by Contributors
* \file scheduler/scheduler_apis.cc
* \brief DGL scheduler APIs
* @file scheduler/scheduler_apis.cc
* @brief DGL scheduler APIs
*/
#include <dgl/array.h>
#include <dgl/graph.h>
......
/*!
* Copyright (c) 2020-2022 by Contributors
* \file tensoradapter.h
* \brief Header file for functions exposed by the adapter library.
* @file tensoradapter.h
* @brief Header file for functions exposed by the adapter library.
*
* Functions in this library must be exported with extern "C" so that DGL can
* locate them with dlsym(3) (or GetProcAddress on Windows).
......@@ -19,50 +19,50 @@ namespace tensoradapter {
extern "C" {
/*!
* \brief Allocate a piece of CPU memory via
* @brief Allocate a piece of CPU memory via
* PyTorch's CPUAllocator
*
* \param nbytes The size to be allocated.
* \return Pointer to the allocated memory.
* @param nbytes The size to be allocated.
* @return Pointer to the allocated memory.
*/
void* CPURawAlloc(size_t nbytes);
/*!
* \brief Free the CPU memory.
* @brief Free the CPU memory.
*
* \param ptr Pointer to the memory to be freed.
* @param ptr Pointer to the memory to be freed.
*/
void CPURawDelete(void* ptr);
#ifdef DGL_USE_CUDA
/*!
* \brief Allocate a piece of GPU memory via
* @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator.
*
* \param nbytes The size to be allocated.
* \param stream The stream to be allocated on.
* \return Pointer to the allocated memory.
* @param nbytes The size to be allocated.
* @param stream The stream to be allocated on.
* @return Pointer to the allocated memory.
*/
void* CUDARawAlloc(size_t nbytes, cudaStream_t stream);
/*!
* \brief Free the GPU memory.
* @brief Free the GPU memory.
*
* \param ptr Pointer to the memory to be freed.
* @param ptr Pointer to the memory to be freed.
*/
void CUDARawDelete(void* ptr);
/*!
* \brief Get the current CUDA stream.
* @brief Get the current CUDA stream.
*/
cudaStream_t CUDACurrentStream();
/*!
* \brief Let the caching allocator know which streams are using this tensor.
* @brief Let the caching allocator know which streams are using this tensor.
*
* \param ptr Pointer of the tensor to be recorded.
* \param stream The stream that is using this tensor.
* \param device_id Device of the tensor.
* @param ptr Pointer of the tensor to be recorded.
* @param stream The stream that is using this tensor.
* @param device_id Device of the tensor.
*/
void RecordStream(void* ptr, cudaStream_t stream, int device_id);
#endif // DGL_USE_CUDA
......
/*!
* Copyright (c) 2020 by Contributors
* \file tensoradapter_exports.h
* \brief Header file for functions exposed by the adapter library.
* @file tensoradapter_exports.h
* @brief Header file for functions exposed by the adapter library.
*/
#ifndef TENSORADAPTER_EXPORTS_H_
......
/*!
* Copyright (c) 2020-2022 by Contributors
* \file torch/torch.cpp
* \brief Implementation of PyTorch adapter library.
* @file torch/torch.cpp
* @brief Implementation of PyTorch adapter library.
*/
#include <c10/core/CPUAllocator.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file graph_index_test.cc
* \brief Test GraphIndex
* @file graph_index_test.cc
* @brief Test GraphIndex
*/
#include <dgl/graph.h>
#include <gtest/gtest.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file msg_queue.cc
* \brief Message queue for DGL distributed training.
* @file msg_queue.cc
* @brief Message queue for DGL distributed training.
*/
#include <gtest/gtest.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file socket_communicator_test.cc
* \brief Test SocketCommunicator
* @file socket_communicator_test.cc
* @brief Test SocketCommunicator
*/
#include "../src/rpc/network/socket_communicator.h"
......
/*!
* Copyright (c) 2019 by Contributors
* \file string_test.cc
* \brief Test String Common
* @file string_test.cc
* @brief Test String Common
*/
#include <gtest/gtest.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file test_unit_graph.cc
* \brief Test UnitGraph
* @file test_unit_graph.cc
* @brief Test UnitGraph
*/
#include <dgl/array.h>
#include <dgl/immutable_graph.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment