Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*! /**
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* @file thread_storage_scope.h * @file thread_storage_scope.h
* @brief Extract thread axis configuration from DGLArgs. * @brief Extract thread axis configuration from DGLArgs.
...@@ -14,28 +14,28 @@ ...@@ -14,28 +14,28 @@
namespace dgl { namespace dgl {
namespace runtime { namespace runtime {
/*! /**
* @brief Memory hierachy rank in the storage system * @brief Memory hierachy rank in the storage system
* @note The global rank and shared rank have one to one * @note The global rank and shared rank have one to one
* correspondence to the thread rank. * correspondence to the thread rank.
*/ */
enum class StorageRank { enum class StorageRank {
/*! @brief global memory */ /** @brief global memory */
kGlobal = 0, kGlobal = 0,
/*! @brief shared memory among thread group */ /** @brief shared memory among thread group */
kShared = 1, kShared = 1,
/*! /**
* @brief reserved for warp memory. * @brief reserved for warp memory.
* This is only used by programming model. * This is only used by programming model.
* There is no such memory usually in GPU. * There is no such memory usually in GPU.
* Instead, we can simulate it by registers and shuffle. * Instead, we can simulate it by registers and shuffle.
*/ */
kWarp = 2, kWarp = 2,
/*! @brief thread local memory */ /** @brief thread local memory */
kLocal = 3 kLocal = 3
}; };
/*! /**
* @param thread_scope_rank The thread scope rank * @param thread_scope_rank The thread scope rank
* @return default storage rank given the thread scope * @return default storage rank given the thread scope
*/ */
...@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) { ...@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) {
} }
} }
/*! @brief class to represent storage scope */ /** @brief class to represent storage scope */
struct StorageScope { struct StorageScope {
/*! @brief The rank of the storage */ /** @brief The rank of the storage */
StorageRank rank{StorageRank::kGlobal}; StorageRank rank{StorageRank::kGlobal};
/*! @brief tag for special purpose memory. */ /** @brief tag for special purpose memory. */
std::string tag; std::string tag;
// comparator // comparator
inline bool operator==(const StorageScope& other) const { inline bool operator==(const StorageScope& other) const {
...@@ -83,7 +83,7 @@ struct StorageScope { ...@@ -83,7 +83,7 @@ struct StorageScope {
return ""; return "";
} }
} }
/*! /**
* @brief make storage scope from string * @brief make storage scope from string
* @param s The string to be parsed. * @param s The string to be parsed.
* @return The storage scope. * @return The storage scope.
...@@ -109,13 +109,13 @@ struct StorageScope { ...@@ -109,13 +109,13 @@ struct StorageScope {
} }
}; };
/*! @brief class to represent thread scope */ /** @brief class to represent thread scope */
struct ThreadScope { struct ThreadScope {
/*! @brief The rank of thread scope */ /** @brief The rank of thread scope */
int rank{0}; int rank{0};
/*! @brief the dimension index under the rank */ /** @brief the dimension index under the rank */
int dim_index{0}; int dim_index{0};
/*! /**
* @brief make storage scope from string * @brief make storage scope from string
* @param s The string to be parsed. * @param s The string to be parsed.
* @return The storage scope. * @return The storage scope.
...@@ -139,22 +139,22 @@ struct ThreadScope { ...@@ -139,22 +139,22 @@ struct ThreadScope {
} }
}; };
/*! @brief workload speccification */ /** @brief workload speccification */
struct ThreadWorkLoad { struct ThreadWorkLoad {
// array, first three are thread configuration. // array, first three are thread configuration.
size_t work_size[6]; size_t work_size[6];
/*! /**
* @param i The block dimension. * @param i The block dimension.
* @return i-th block dim * @return i-th block dim
*/ */
inline size_t block_dim(size_t i) const { return work_size[i + 3]; } inline size_t block_dim(size_t i) const { return work_size[i + 3]; }
/*! /**
* @param i The grid dimension. * @param i The grid dimension.
* @return i-th grid dim * @return i-th grid dim
*/ */
inline size_t grid_dim(size_t i) const { return work_size[i]; } inline size_t grid_dim(size_t i) const { return work_size[i]; }
}; };
/*! @brief Thread axis configuration */ /** @brief Thread axis configuration */
class ThreadAxisConfig { class ThreadAxisConfig {
public: public:
void Init(size_t base, const std::vector<std::string>& thread_axis_tags) { void Init(size_t base, const std::vector<std::string>& thread_axis_tags) {
...@@ -187,11 +187,11 @@ class ThreadAxisConfig { ...@@ -187,11 +187,11 @@ class ThreadAxisConfig {
size_t work_dim() const { return work_dim_; } size_t work_dim() const { return work_dim_; }
private: private:
/*! @brief base axis */ /** @brief base axis */
size_t base_; size_t base_;
/*! @brief The worker dimension */ /** @brief The worker dimension */
size_t work_dim_; size_t work_dim_;
/*! @brief The index mapping. */ /** @brief The index mapping. */
std::vector<uint32_t> arg_index_map_; std::vector<uint32_t> arg_index_map_;
}; };
......
/*! /**
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* @file threading_backend.cc * @file threading_backend.cc
* @brief Native threading backend * @brief Native threading backend
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file utils.cc * @file utils.cc
* @brief DGL util functions * @brief DGL util functions
......
/*! /**
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* @file ndarray_partition.h * @file ndarray_partition.h
* @brief Operations on partition implemented in CUDA. * @brief Operations on partition implemented in CUDA.
......
/*! /**
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* @file workspace_pool.h * @file workspace_pool.h
* @brief Workspace pool utility. * @brief Workspace pool utility.
...@@ -108,14 +108,14 @@ class WorkspacePool::Pool { ...@@ -108,14 +108,14 @@ class WorkspacePool::Pool {
} }
private: private:
/*! @brief a single entry in the pool */ /** @brief a single entry in the pool */
struct Entry { struct Entry {
void* data; void* data;
size_t size; size_t size;
}; };
/*! @brief List of free items, sorted from small to big size */ /** @brief List of free items, sorted from small to big size */
std::vector<Entry> free_list_; std::vector<Entry> free_list_;
/*! @brief List of allocated items */ /** @brief List of allocated items */
std::vector<Entry> allocated_; std::vector<Entry> allocated_;
}; };
......
/*! /**
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* @file workspace_pool.h * @file workspace_pool.h
* @brief Workspace pool utility. * @brief Workspace pool utility.
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
namespace dgl { namespace dgl {
namespace runtime { namespace runtime {
/*! /**
* @brief A workspace pool to manage * @brief A workspace pool to manage
* *
* \note We have the following assumption about backend temporal * \note We have the following assumption about backend temporal
...@@ -26,21 +26,21 @@ namespace runtime { ...@@ -26,21 +26,21 @@ namespace runtime {
*/ */
class WorkspacePool { class WorkspacePool {
public: public:
/*! /**
* @brief Create pool with specific device type and device. * @brief Create pool with specific device type and device.
* @param device_type The device type. * @param device_type The device type.
* @param device The device API. * @param device The device API.
*/ */
WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device); WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device);
/*! @brief destructor */ /** @brief destructor */
~WorkspacePool(); ~WorkspacePool();
/*! /**
* @brief Allocate temporal workspace. * @brief Allocate temporal workspace.
* @param ctx The context of allocation. * @param ctx The context of allocation.
* @param size The size to be allocated. * @param size The size to be allocated.
*/ */
void* AllocWorkspace(DGLContext ctx, size_t size); void* AllocWorkspace(DGLContext ctx, size_t size);
/*! /**
* @brief Free temporal workspace in backend execution. * @brief Free temporal workspace in backend execution.
* *
* @param ctx The context of allocation. * @param ctx The context of allocation.
...@@ -50,11 +50,11 @@ class WorkspacePool { ...@@ -50,11 +50,11 @@ class WorkspacePool {
private: private:
class Pool; class Pool;
/*! @brief pool of device local array */ /** @brief pool of device local array */
std::vector<Pool*> array_; std::vector<Pool*> array_;
/*! @brief device type this pool support */ /** @brief device type this pool support */
DGLDeviceType device_type_; DGLDeviceType device_type_;
/*! @brief The device API */ /** @brief The device API */
std::shared_ptr<DeviceAPI> device_; std::shared_ptr<DeviceAPI> device_;
}; };
......
/*! /**
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* @file scheduler/scheduler.cc * @file scheduler/scheduler.cc
* @brief DGL Scheduler implementation * @brief DGL Scheduler implementation
......
/*! /**
* Copyright (c) 2018 by Contributors * Copyright (c) 2018 by Contributors
* @file scheduler/scheduler_apis.cc * @file scheduler/scheduler_apis.cc
* @brief DGL scheduler APIs * @brief DGL scheduler APIs
......
/*! /**
* Copyright (c) 2020-2022 by Contributors * Copyright (c) 2020-2022 by Contributors
* @file tensoradapter.h * @file tensoradapter.h
* @brief Header file for functions exposed by the adapter library. * @brief Header file for functions exposed by the adapter library.
...@@ -18,7 +18,7 @@ namespace tensoradapter { ...@@ -18,7 +18,7 @@ namespace tensoradapter {
extern "C" { extern "C" {
/*! /**
* @brief Allocate a piece of CPU memory via * @brief Allocate a piece of CPU memory via
* PyTorch's CPUAllocator * PyTorch's CPUAllocator
* *
...@@ -27,7 +27,7 @@ extern "C" { ...@@ -27,7 +27,7 @@ extern "C" {
*/ */
void* CPURawAlloc(size_t nbytes); void* CPURawAlloc(size_t nbytes);
/*! /**
* @brief Free the CPU memory. * @brief Free the CPU memory.
* *
* @param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
...@@ -35,7 +35,7 @@ void* CPURawAlloc(size_t nbytes); ...@@ -35,7 +35,7 @@ void* CPURawAlloc(size_t nbytes);
void CPURawDelete(void* ptr); void CPURawDelete(void* ptr);
#ifdef DGL_USE_CUDA #ifdef DGL_USE_CUDA
/*! /**
* @brief Allocate a piece of GPU memory via * @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator. * PyTorch's THCCachingAllocator.
* *
...@@ -45,19 +45,19 @@ void CPURawDelete(void* ptr); ...@@ -45,19 +45,19 @@ void CPURawDelete(void* ptr);
*/ */
void* CUDARawAlloc(size_t nbytes, cudaStream_t stream); void* CUDARawAlloc(size_t nbytes, cudaStream_t stream);
/*! /**
* @brief Free the GPU memory. * @brief Free the GPU memory.
* *
* @param ptr Pointer to the memory to be freed. * @param ptr Pointer to the memory to be freed.
*/ */
void CUDARawDelete(void* ptr); void CUDARawDelete(void* ptr);
/*! /**
* @brief Get the current CUDA stream. * @brief Get the current CUDA stream.
*/ */
cudaStream_t CUDACurrentStream(); cudaStream_t CUDACurrentStream();
/*! /**
* @brief Let the caching allocator know which streams are using this tensor. * @brief Let the caching allocator know which streams are using this tensor.
* *
* @param ptr Pointer of the tensor to be recorded. * @param ptr Pointer of the tensor to be recorded.
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file tensoradapter_exports.h * @file tensoradapter_exports.h
* @brief Header file for functions exposed by the adapter library. * @brief Header file for functions exposed by the adapter library.
......
/*! /**
* Copyright (c) 2020-2022 by Contributors * Copyright (c) 2020-2022 by Contributors
* @file torch/torch.cpp * @file torch/torch.cpp
* @brief Implementation of PyTorch adapter library. * @brief Implementation of PyTorch adapter library.
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file graph_index_test.cc * @file graph_index_test.cc
* @brief Test GraphIndex * @brief Test GraphIndex
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file msg_queue.cc * @file msg_queue.cc
* @brief Message queue for DGL distributed training. * @brief Message queue for DGL distributed training.
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file socket_communicator_test.cc * @file socket_communicator_test.cc
* @brief Test SocketCommunicator * @brief Test SocketCommunicator
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file string_test.cc * @file string_test.cc
* @brief Test String Common * @brief Test String Common
......
...@@ -264,7 +264,7 @@ void _TestConcat(DGLContext ctx) { ...@@ -264,7 +264,7 @@ void _TestConcat(DGLContext ctx) {
template <typename IdType> template <typename IdType>
void _TestToSimpleCsr(DGLContext ctx) { void _TestToSimpleCsr(DGLContext ctx) {
/* /**
* A = [[0, 0, 0, 0], * A = [[0, 0, 0, 0],
* [1, 0, 0, 1], * [1, 0, 0, 1],
* [1, 1, 1, 1], * [1, 1, 1, 1],
...@@ -341,7 +341,7 @@ TEST(MatrixTest, TestToSimpleCsr) { ...@@ -341,7 +341,7 @@ TEST(MatrixTest, TestToSimpleCsr) {
template <typename IdType> template <typename IdType>
void _TestToSimpleCoo(DGLContext ctx) { void _TestToSimpleCoo(DGLContext ctx) {
/* /**
* A = [[0, 0, 0, 0], * A = [[0, 0, 0, 0],
* [1, 0, 0, 1], * [1, 0, 0, 1],
* [1, 1, 1, 1], * [1, 1, 1, 1],
...@@ -429,7 +429,7 @@ TEST(MatrixTest, TestToSimpleCoo) { ...@@ -429,7 +429,7 @@ TEST(MatrixTest, TestToSimpleCoo) {
template <typename IdType> template <typename IdType>
void _TestDisjointUnionPartitionCoo(DGLContext ctx) { void _TestDisjointUnionPartitionCoo(DGLContext ctx) {
/* /**
* A = [[0, 0, 1], * A = [[0, 0, 1],
* [1, 0, 1], * [1, 0, 1],
* [0, 1, 0]] * [0, 1, 0]]
...@@ -564,7 +564,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCoo) { ...@@ -564,7 +564,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCoo) {
template <typename IdType> template <typename IdType>
void _TestDisjointUnionPartitionCsr(DGLContext ctx) { void _TestDisjointUnionPartitionCsr(DGLContext ctx) {
/* /**
* A = [[0, 0, 1], * A = [[0, 0, 1],
* [1, 0, 1], * [1, 0, 1],
* [0, 1, 0]] * [0, 1, 0]]
...@@ -690,7 +690,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCsr) { ...@@ -690,7 +690,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCsr) {
template <typename IdType> template <typename IdType>
void _TestSliceContiguousChunkCoo(DGLContext ctx) { void _TestSliceContiguousChunkCoo(DGLContext ctx) {
/* /**
* A = [[1, 0, 0, 0], * A = [[1, 0, 0, 0],
* [0, 0, 1, 0], * [0, 0, 1, 0],
* [0, 0, 0, 0]] * [0, 0, 0, 0]]
...@@ -758,7 +758,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCoo) { ...@@ -758,7 +758,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCoo) {
template <typename IdType> template <typename IdType>
void _TestSliceContiguousChunkCsr(DGLContext ctx) { void _TestSliceContiguousChunkCsr(DGLContext ctx) {
/* /**
* A = [[1, 0, 0, 0], * A = [[1, 0, 0, 0],
* [0, 0, 1, 0], * [0, 0, 1, 0],
* [0, 0, 0, 0]] * [0, 0, 0, 0]]
...@@ -825,7 +825,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCsr) { ...@@ -825,7 +825,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCsr) {
template <typename IdType> template <typename IdType>
void _TestMatrixUnionCsr(DGLContext ctx) { void _TestMatrixUnionCsr(DGLContext ctx) {
/* /**
* A = [[0, 0, 0, 0], * A = [[0, 0, 0, 0],
* [0, 0, 0, 0], * [0, 0, 0, 0],
* [0, 1, 0, 0], * [0, 1, 0, 0],
...@@ -1016,7 +1016,7 @@ TEST(MatrixUnionTest, TestMatrixUnionCsr) { ...@@ -1016,7 +1016,7 @@ TEST(MatrixUnionTest, TestMatrixUnionCsr) {
template <typename IdType> template <typename IdType>
void _TestMatrixUnionCoo(DGLContext ctx) { void _TestMatrixUnionCoo(DGLContext ctx) {
/* /**
* A = [[0, 0, 0, 0], * A = [[0, 0, 0, 0],
* [0, 0, 0, 0], * [0, 0, 0, 0],
* [0, 1, 0, 0], * [0, 1, 0, 0],
...@@ -1293,7 +1293,7 @@ TEST(ArrayTest, NonZero) { ...@@ -1293,7 +1293,7 @@ TEST(ArrayTest, NonZero) {
template <typename IdType> template <typename IdType>
void _TestLineGraphCOO(DGLContext ctx) { void _TestLineGraphCOO(DGLContext ctx) {
/* /**
* A = [[0, 0, 1, 0], * A = [[0, 0, 1, 0],
* [1, 0, 1, 0], * [1, 0, 1, 0],
* [1, 1, 0, 0], * [1, 1, 0, 0],
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file test_unit_graph.cc * @file test_unit_graph.cc
* @brief Test UnitGraph * @brief Test UnitGraph
...@@ -20,7 +20,7 @@ using namespace dgl::runtime; ...@@ -20,7 +20,7 @@ using namespace dgl::runtime;
template <typename IdType> template <typename IdType>
aten::CSRMatrix CSR1(DGLContext ctx) { aten::CSRMatrix CSR1(DGLContext ctx) {
/* /**
* G = [[0, 0, 1], * G = [[0, 0, 1],
* [1, 0, 1], * [1, 0, 1],
* [0, 1, 0], * [0, 1, 0],
...@@ -41,7 +41,7 @@ template aten::CSRMatrix CSR1<int64_t>(DGLContext ctx); ...@@ -41,7 +41,7 @@ template aten::CSRMatrix CSR1<int64_t>(DGLContext ctx);
template <typename IdType> template <typename IdType>
aten::COOMatrix COO1(DGLContext ctx) { aten::COOMatrix COO1(DGLContext ctx) {
/* /**
* G = [[1, 1, 0], * G = [[1, 1, 0],
* [0, 1, 0]] * [0, 1, 0]]
*/ */
...@@ -60,7 +60,7 @@ template aten::COOMatrix COO1<int64_t>(DGLContext ctx); ...@@ -60,7 +60,7 @@ template aten::COOMatrix COO1<int64_t>(DGLContext ctx);
template <typename IdType> template <typename IdType>
void _TestUnitGraph_InOutDegrees(DGLContext ctx) { void _TestUnitGraph_InOutDegrees(DGLContext ctx) {
/* /**
InDegree(s) is available only if COO or CSC formats permitted. InDegree(s) is available only if COO or CSC formats permitted.
OutDegree(s) is available only if COO or CSR formats permitted. OutDegree(s) is available only if COO or CSR formats permitted.
*/ */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment