Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*!
/**
* Copyright (c) 2017 by Contributors
* @file thread_storage_scope.h
* @brief Extract thread axis configuration from DGLArgs.
......@@ -14,28 +14,28 @@
namespace dgl {
namespace runtime {
/*!
/**
* @brief Memory hierachy rank in the storage system
* @note The global rank and shared rank have one to one
* correspondence to the thread rank.
*/
enum class StorageRank {
/*! @brief global memory */
/** @brief global memory */
kGlobal = 0,
/*! @brief shared memory among thread group */
/** @brief shared memory among thread group */
kShared = 1,
/*!
/**
* @brief reserved for warp memory.
* This is only used by programming model.
* There is no such memory usually in GPU.
* Instead, we can simulate it by registers and shuffle.
*/
kWarp = 2,
/*! @brief thread local memory */
/** @brief thread local memory */
kLocal = 3
};
/*!
/**
* @param thread_scope_rank The thread scope rank
* @return default storage rank given the thread scope
*/
......@@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) {
}
}
/*! @brief class to represent storage scope */
/** @brief class to represent storage scope */
struct StorageScope {
/*! @brief The rank of the storage */
/** @brief The rank of the storage */
StorageRank rank{StorageRank::kGlobal};
/*! @brief tag for special purpose memory. */
/** @brief tag for special purpose memory. */
std::string tag;
// comparator
inline bool operator==(const StorageScope& other) const {
......@@ -83,7 +83,7 @@ struct StorageScope {
return "";
}
}
/*!
/**
* @brief make storage scope from string
* @param s The string to be parsed.
* @return The storage scope.
......@@ -109,13 +109,13 @@ struct StorageScope {
}
};
/*! @brief class to represent thread scope */
/** @brief class to represent thread scope */
struct ThreadScope {
/*! @brief The rank of thread scope */
/** @brief The rank of thread scope */
int rank{0};
/*! @brief the dimension index under the rank */
/** @brief the dimension index under the rank */
int dim_index{0};
/*!
/**
* @brief make storage scope from string
* @param s The string to be parsed.
* @return The storage scope.
......@@ -139,22 +139,22 @@ struct ThreadScope {
}
};
/*! @brief workload speccification */
/** @brief workload speccification */
struct ThreadWorkLoad {
// array, first three are thread configuration.
size_t work_size[6];
/*!
/**
* @param i The block dimension.
* @return i-th block dim
*/
inline size_t block_dim(size_t i) const { return work_size[i + 3]; }
/*!
/**
* @param i The grid dimension.
* @return i-th grid dim
*/
inline size_t grid_dim(size_t i) const { return work_size[i]; }
};
/*! @brief Thread axis configuration */
/** @brief Thread axis configuration */
class ThreadAxisConfig {
public:
void Init(size_t base, const std::vector<std::string>& thread_axis_tags) {
......@@ -187,11 +187,11 @@ class ThreadAxisConfig {
size_t work_dim() const { return work_dim_; }
private:
/*! @brief base axis */
/** @brief base axis */
size_t base_;
/*! @brief The worker dimension */
/** @brief The worker dimension */
size_t work_dim_;
/*! @brief The index mapping. */
/** @brief The index mapping. */
std::vector<uint32_t> arg_index_map_;
};
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file threading_backend.cc
* @brief Native threading backend
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file utils.cc
* @brief DGL util functions
......
/*!
/**
* Copyright (c) 2021 by Contributors
* @file ndarray_partition.h
* @brief Operations on partition implemented in CUDA.
......
/*!
/**
* Copyright (c) 2017 by Contributors
* @file workspace_pool.h
* @brief Workspace pool utility.
......@@ -108,14 +108,14 @@ class WorkspacePool::Pool {
}
private:
/*! @brief a single entry in the pool */
/** @brief a single entry in the pool */
struct Entry {
void* data;
size_t size;
};
/*! @brief List of free items, sorted from small to big size */
/** @brief List of free items, sorted from small to big size */
std::vector<Entry> free_list_;
/*! @brief List of allocated items */
/** @brief List of allocated items */
std::vector<Entry> allocated_;
};
......
/*!
/**
* Copyright (c) 2017 by Contributors
* @file workspace_pool.h
* @brief Workspace pool utility.
......@@ -13,7 +13,7 @@
namespace dgl {
namespace runtime {
/*!
/**
* @brief A workspace pool to manage
*
* \note We have the following assumption about backend temporal
......@@ -26,21 +26,21 @@ namespace runtime {
*/
class WorkspacePool {
public:
/*!
/**
* @brief Create pool with specific device type and device.
* @param device_type The device type.
* @param device The device API.
*/
WorkspacePool(DGLDeviceType device_type, std::shared_ptr<DeviceAPI> device);
/*! @brief destructor */
/** @brief destructor */
~WorkspacePool();
/*!
/**
* @brief Allocate temporal workspace.
* @param ctx The context of allocation.
* @param size The size to be allocated.
*/
void* AllocWorkspace(DGLContext ctx, size_t size);
/*!
/**
* @brief Free temporal workspace in backend execution.
*
* @param ctx The context of allocation.
......@@ -50,11 +50,11 @@ class WorkspacePool {
private:
class Pool;
/*! @brief pool of device local array */
/** @brief pool of device local array */
std::vector<Pool*> array_;
/*! @brief device type this pool support */
/** @brief device type this pool support */
DGLDeviceType device_type_;
/*! @brief The device API */
/** @brief The device API */
std::shared_ptr<DeviceAPI> device_;
};
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file scheduler/scheduler.cc
* @brief DGL Scheduler implementation
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file scheduler/scheduler_apis.cc
* @brief DGL scheduler APIs
......
/*!
/**
* Copyright (c) 2020-2022 by Contributors
* @file tensoradapter.h
* @brief Header file for functions exposed by the adapter library.
......@@ -18,7 +18,7 @@ namespace tensoradapter {
extern "C" {
/*!
/**
* @brief Allocate a piece of CPU memory via
* PyTorch's CPUAllocator
*
......@@ -27,7 +27,7 @@ extern "C" {
*/
void* CPURawAlloc(size_t nbytes);
/*!
/**
* @brief Free the CPU memory.
*
* @param ptr Pointer to the memory to be freed.
......@@ -35,7 +35,7 @@ void* CPURawAlloc(size_t nbytes);
void CPURawDelete(void* ptr);
#ifdef DGL_USE_CUDA
/*!
/**
* @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator.
*
......@@ -45,19 +45,19 @@ void CPURawDelete(void* ptr);
*/
void* CUDARawAlloc(size_t nbytes, cudaStream_t stream);
/*!
/**
* @brief Free the GPU memory.
*
* @param ptr Pointer to the memory to be freed.
*/
void CUDARawDelete(void* ptr);
/*!
/**
* @brief Get the current CUDA stream.
*/
cudaStream_t CUDACurrentStream();
/*!
/**
* @brief Let the caching allocator know which streams are using this tensor.
*
* @param ptr Pointer of the tensor to be recorded.
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file tensoradapter_exports.h
* @brief Header file for functions exposed by the adapter library.
......
/*!
/**
* Copyright (c) 2020-2022 by Contributors
* @file torch/torch.cpp
* @brief Implementation of PyTorch adapter library.
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file graph_index_test.cc
* @brief Test GraphIndex
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file msg_queue.cc
* @brief Message queue for DGL distributed training.
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file socket_communicator_test.cc
* @brief Test SocketCommunicator
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file string_test.cc
* @brief Test String Common
......
......@@ -264,7 +264,7 @@ void _TestConcat(DGLContext ctx) {
template <typename IdType>
void _TestToSimpleCsr(DGLContext ctx) {
/*
/**
* A = [[0, 0, 0, 0],
* [1, 0, 0, 1],
* [1, 1, 1, 1],
......@@ -341,7 +341,7 @@ TEST(MatrixTest, TestToSimpleCsr) {
template <typename IdType>
void _TestToSimpleCoo(DGLContext ctx) {
/*
/**
* A = [[0, 0, 0, 0],
* [1, 0, 0, 1],
* [1, 1, 1, 1],
......@@ -429,7 +429,7 @@ TEST(MatrixTest, TestToSimpleCoo) {
template <typename IdType>
void _TestDisjointUnionPartitionCoo(DGLContext ctx) {
/*
/**
* A = [[0, 0, 1],
* [1, 0, 1],
* [0, 1, 0]]
......@@ -564,7 +564,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCoo) {
template <typename IdType>
void _TestDisjointUnionPartitionCsr(DGLContext ctx) {
/*
/**
* A = [[0, 0, 1],
* [1, 0, 1],
* [0, 1, 0]]
......@@ -690,7 +690,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCsr) {
template <typename IdType>
void _TestSliceContiguousChunkCoo(DGLContext ctx) {
/*
/**
* A = [[1, 0, 0, 0],
* [0, 0, 1, 0],
* [0, 0, 0, 0]]
......@@ -758,7 +758,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCoo) {
template <typename IdType>
void _TestSliceContiguousChunkCsr(DGLContext ctx) {
/*
/**
* A = [[1, 0, 0, 0],
* [0, 0, 1, 0],
* [0, 0, 0, 0]]
......@@ -825,7 +825,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCsr) {
template <typename IdType>
void _TestMatrixUnionCsr(DGLContext ctx) {
/*
/**
* A = [[0, 0, 0, 0],
* [0, 0, 0, 0],
* [0, 1, 0, 0],
......@@ -1016,7 +1016,7 @@ TEST(MatrixUnionTest, TestMatrixUnionCsr) {
template <typename IdType>
void _TestMatrixUnionCoo(DGLContext ctx) {
/*
/**
* A = [[0, 0, 0, 0],
* [0, 0, 0, 0],
* [0, 1, 0, 0],
......@@ -1293,7 +1293,7 @@ TEST(ArrayTest, NonZero) {
template <typename IdType>
void _TestLineGraphCOO(DGLContext ctx) {
/*
/**
* A = [[0, 0, 1, 0],
* [1, 0, 1, 0],
* [1, 1, 0, 0],
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file test_unit_graph.cc
* @brief Test UnitGraph
......@@ -20,7 +20,7 @@ using namespace dgl::runtime;
template <typename IdType>
aten::CSRMatrix CSR1(DGLContext ctx) {
/*
/**
* G = [[0, 0, 1],
* [1, 0, 1],
* [0, 1, 0],
......@@ -41,7 +41,7 @@ template aten::CSRMatrix CSR1<int64_t>(DGLContext ctx);
template <typename IdType>
aten::COOMatrix COO1(DGLContext ctx) {
/*
/**
* G = [[1, 1, 0],
* [0, 1, 0]]
*/
......@@ -60,7 +60,7 @@ template aten::COOMatrix COO1<int64_t>(DGLContext ctx);
template <typename IdType>
void _TestUnitGraph_InOutDegrees(DGLContext ctx) {
/*
/**
InDegree(s) is available only if COO or CSC formats permitted.
OutDegree(s) is available only if COO or CSR formats permitted.
*/
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment