Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*!
/**
* Copyright (c) 2020-2022 by Contributors
* @file array/tensordispatch.h
* @brief This file defines the dispatcher of tensor operators to
......@@ -38,7 +38,7 @@
#endif // DGL_USE_CUDA
#include "ndarray.h"
/*!
/**
* @brief Casts a pointer \c entry to a function pointer with signature of \c
* func.
*/
......@@ -47,7 +47,7 @@
namespace dgl {
namespace runtime {
/*!
/**
* @brief Dispatcher that delegates the function calls to framework-specific C++
* APIs.
*
......@@ -55,19 +55,19 @@ namespace runtime {
*/
class TensorDispatcher {
public:
/*! @brief Get the singleton instance. */
/** @brief Get the singleton instance. */
static TensorDispatcher* Global() {
static TensorDispatcher inst;
return &inst;
}
/*! @brief Whether an adapter library is available. */
/** @brief Whether an adapter library is available. */
inline bool IsAvailable() { return available_; }
/*! @brief Load symbols from the given tensor adapter library path. */
/** @brief Load symbols from the given tensor adapter library path. */
bool Load(const char* path_cstr);
/*!
/**
* @brief Allocate a piece of CPU memory via PyTorch's CPUAllocator.
* Used in CPUDeviceAPI::AllocWorkspace().
*
......@@ -79,7 +79,7 @@ class TensorDispatcher {
return FUNCCAST(tensoradapter::CPURawAlloc, entry)(nbytes);
}
/*!
/**
* @brief Free the CPU memory.
* Used in CPUDeviceAPI::FreeWorkspace().
*
......@@ -91,7 +91,7 @@ class TensorDispatcher {
}
#ifdef DGL_USE_CUDA
/*!
/**
* @brief Allocate a piece of GPU memory via
* PyTorch's THCCachingAllocator.
* Used in CUDADeviceAPI::AllocWorkspace().
......@@ -109,7 +109,7 @@ class TensorDispatcher {
return FUNCCAST(tensoradapter::CUDARawAlloc, entry)(nbytes, stream);
}
/*!
/**
* @brief Free the GPU memory.
* Used in CUDADeviceAPI::FreeWorkspace().
*
......@@ -120,7 +120,7 @@ class TensorDispatcher {
FUNCCAST(tensoradapter::CUDARawDelete, entry)(ptr);
}
/*!
/**
* @brief Find the current PyTorch CUDA stream
* Used in runtime::getCurrentCUDAStream().
*
......@@ -136,7 +136,7 @@ class TensorDispatcher {
}
#endif // DGL_USE_CUDA
/*!
/**
* @brief Record streams that are using this tensor.
* Used in NDArray::RecordStream().
*
......@@ -153,12 +153,12 @@ class TensorDispatcher {
}
private:
/*! @brief ctor */
/** @brief ctor */
TensorDispatcher() = default;
/*! @brief dtor */
/** @brief dtor */
~TensorDispatcher();
/*!
/**
* @brief List of symbols in the adapter library.
*
* Must match the functions in tensoradapter/include/tensoradapter.h.
......@@ -170,7 +170,7 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA
};
/*! @brief Index of each function to the symbol list */
/** @brief Index of each function to the symbol list */
class Op {
public:
static constexpr int kCPURawAlloc = 0;
......@@ -183,10 +183,10 @@ class TensorDispatcher {
#endif // DGL_USE_CUDA
};
/*! @brief Number of functions */
/** @brief Number of functions */
static constexpr int num_entries_ = sizeof(names_) / sizeof(names_[0]);
/*! @brief Entrypoints of each function */
/** @brief Entrypoints of each function */
void* entrypoints_[num_entries_] = {
nullptr, nullptr,
#ifdef DGL_USE_CUDA
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file dgl/runtime/threading_backend.h
* @brief Utilities for manipulating thread pool threads.
......@@ -14,7 +14,7 @@ namespace dgl {
namespace runtime {
namespace threading {
/*!
/**
* @brief A platform-agnostic abstraction for managing a collection of
* thread pool threads.
*/
......@@ -22,7 +22,7 @@ class ThreadGroup {
public:
class Impl;
/*!
/**
* @brief Creates a collection of threads which run a provided function.
*
* @param num_workers The total number of worker threads in this group.
......@@ -39,7 +39,7 @@ class ThreadGroup {
bool exclude_worker0 = false);
~ThreadGroup();
/*!
/**
* @brief Blocks until all non-main threads in the pool finish.
*/
void Join();
......@@ -49,7 +49,7 @@ class ThreadGroup {
kLittle = -1,
};
/*!
/**
* @brief configure the CPU id affinity
*
* @param mode The preferred CPU type (1 = big, -1 = little).
......@@ -67,14 +67,14 @@ class ThreadGroup {
Impl* impl_;
};
/*!
/**
* @brief Platform-agnostic no-op.
*/
// This used to be Yield(), renaming to YieldThread() because windows.h defined
// it as a macro in later SDKs.
void YieldThread();
/*!
/**
* @return the maximum number of effective workers for this system.
*/
int MaxConcurrency();
......
/*!
/**
* Copyright (c) 2017 by Contributors
* @file dgl/runtime/util.h
* @brief Useful runtime util.
......@@ -11,7 +11,7 @@
namespace dgl {
namespace runtime {
/*!
/**
* @brief Check whether type matches the given spec.
* @param t The type
* @param code The type code.
......@@ -28,7 +28,7 @@ inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) {
namespace dgl {
namespace ir {
namespace intrinsic {
/*! @brief The kind of structure field info used in intrinsic */
/** @brief The kind of structure field info used in intrinsic */
enum DGLStructFieldKind : int {
// array head address
kArrAddr,
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file dgl/sampler.h
* @brief DGL sampler header.
......@@ -20,7 +20,7 @@ class ImmutableGraph;
class SamplerOp {
public:
/*!
/**
* @brief Sample a graph from the seed vertices with neighbor sampling.
* The neighbors are sampled with a uniform distribution.
*
......@@ -39,7 +39,7 @@ class SamplerOp {
const std::string &edge_type, int num_hops, int expand_factor,
const bool add_self_loop, const ValueType *probability);
/*!
/**
* @brief Sample a graph from the seed vertices with layer sampling.
* The layers are sampled with a uniform distribution.
*
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file dgl/sampling/negative.h
* @brief Negative sampling.
......@@ -14,7 +14,7 @@
namespace dgl {
namespace sampling {
/*!
/**
* @brief Given an edge type, uniformly sample source-destination pairs that do
* not have an edge in between using rejection sampling.
*
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file dgl/sampling/neighbor.h
* @brief Neighborhood-based sampling.
......@@ -14,7 +14,7 @@
namespace dgl {
namespace sampling {
/*!
/**
* @brief Sample from the neighbors of the given nodes and return the sampled
* edges as a graph.
*
......@@ -47,7 +47,7 @@ HeteroSubgraph SampleNeighbors(
const std::vector<FloatArray>& probability,
const std::vector<IdArray>& exclude_edges, bool replace = true);
/*!
/**
* Select the neighbors with k-largest weights on the connecting edges for each
* given node.
*
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file dgl/samplinig/randomwalks.h
* @brief Random walk functions.
......@@ -17,7 +17,7 @@ namespace dgl {
namespace sampling {
/*!
/**
* @brief Metapath-based random walk.
* @param hg The heterograph.
* @param seeds A 1D array of seed nodes, with the type the source type of the
......@@ -37,7 +37,7 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalk(
const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath,
const std::vector<FloatArray> &prob);
/*!
/**
* @brief Metapath-based random walk with restart probability.
* @param hg The heterograph.
* @param seeds A 1D array of seed nodes, with the type the source type of the
......@@ -58,7 +58,7 @@ std::tuple<IdArray, IdArray, TypeArray> RandomWalkWithRestart(
const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath,
const std::vector<FloatArray> &prob, double restart_prob);
/*!
/**
* @brief Metapath-based random walk with stepwise restart probability. Useful
* for PinSAGE-like models.
* @param hg The heterograph.
......
/*!
/**
* Copyright (c) 2018 by Contributors
* @file dgl/scheduler.h
* @brief Operations on graph index.
......@@ -16,7 +16,7 @@ typedef dgl::runtime::NDArray IdArray;
namespace sched {
/*!
/**
* @brief Generate degree bucketing schedule
* @tparam IdType Graph's index data type, can be int32_t or int64_t
* @param msg_ids The edge id for each message
......@@ -35,7 +35,7 @@ template <class IdType>
std::vector<IdArray> DegreeBucketing(
const IdArray& msg_ids, const IdArray& vids, const IdArray& recv_ids);
/*!
/**
* @brief Generate degree bucketing schedule for group_apply edge
* @tparam IdType Graph's index data type, can be int32_t or int64_t
* @param uids One end vertex of edge by which edges are grouped
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file dgl/transform.h
* @brief DGL graph transformations
......@@ -18,7 +18,7 @@ namespace dgl {
namespace transform {
/*!
/**
* @brief Given a list of graphs, remove the common nodes that do not have
* inbound and outbound edges.
*
......@@ -37,7 +37,7 @@ std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>> CompactGraphs(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve);
/*!
/**
* @brief Convert a graph into a bipartite-structured graph for message passing.
*
* Specifically, we create one node type \c ntype_l on the "left" side and
......@@ -83,7 +83,7 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ToBlock(
HeteroGraphPtr graph, const std::vector<IdArray> &rhs_nodes,
bool include_rhs_in_lhs);
/*!
/**
* @brief Convert a multigraph to a simple graph.
*
* @return A triplet of
......@@ -116,7 +116,7 @@ std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>> ToBlock(
std::tuple<HeteroGraphPtr, std::vector<IdArray>, std::vector<IdArray>>
ToSimpleGraph(const HeteroGraphPtr graph);
/*!
/**
* @brief Remove edges from a graph.
*
* @param graph The graph.
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file rpc/shared_mem_serializer.h
* @brief headers for serializer.
......@@ -23,7 +23,7 @@
namespace dgl {
/*!
/**
*
* StreamWithBuffer is backed up by dmlc::MemoryFixedSizeStream or
* dmlc::MemoryStringStream. This class supports serializing and deserializing
......@@ -62,7 +62,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
explicit Buffer(void* data) : data(data) {}
};
/*!
/**
* @brief This constructor is for writing scenario or reading from local
* machine
* @param strm The backup stream to write/load from
......@@ -74,7 +74,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
: strm_(std::move(strm)),
buffer_list_(),
send_to_remote_(send_to_remote) {}
/*!
/**
* @brief This constructor is for reading from remote
* @param strm The stream to write/load from zerocopy write/load
* @param data_ptr_list list of pointer to reconstruct NDArray
......@@ -94,7 +94,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
}
}
/*!
/**
* @brief Construct stream backed up by string
* @param blob The string to write/load from zerocopy write/load
* @param send_to_remote Whether this stream will be deserialized at remote
......@@ -105,7 +105,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
: strm_(new dmlc::MemoryStringStream(blob)),
send_to_remote_(send_to_remote) {}
/*!
/**
* @brief Construct stream backed up by string
* @param p_buffer buffer pointer
* @param size buffer size
......@@ -117,7 +117,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
: strm_(new dmlc::MemoryFixedSizeStream(p_buffer, size)),
send_to_remote_(send_to_remote) {}
/*!
/**
* @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list
* @param blob The string to write/load from zerocopy write/load
......@@ -130,7 +130,7 @@ class StreamWithBuffer : public dmlc::SeekStream {
}
}
/*!
/**
* @brief Construct stream backed up by string, and reconstruct NDArray
* from data_ptr_list
* @param p_buffer buffer pointer
......@@ -155,14 +155,14 @@ class StreamWithBuffer : public dmlc::SeekStream {
using dmlc::Stream::Read;
using dmlc::Stream::Write;
/*!
/**
* @brief push NDArray into stream
* If send_to_remote=true, the NDArray will be saved to the buffer list
* If send_to_remote=false, the NDArray will be saved to the backedup string
*/
void PushNDArray(const runtime::NDArray& tensor);
/*!
/**
* @brief pop NDArray from stream
* If send_to_remote=true, the NDArray will be reconstructed from buffer list
* If send_to_remote=false, the NDArray will be reconstructed from shared
......@@ -170,12 +170,12 @@ class StreamWithBuffer : public dmlc::SeekStream {
*/
dgl::runtime::NDArray PopNDArray();
/*!
/**
* @brief Get whether this stream is for remote usage
*/
bool send_to_remote() { return send_to_remote_; }
/*!
/**
* @brief Get underlying buffer list
*/
const std::deque<Buffer>& buffer_list() const { return buffer_list_; }
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file intel/cpu_support.h
* @brief Intel CPU support
......@@ -53,7 +53,7 @@ struct IntelKernel {
}
};
/*!
/**
* @brief Element-wise addition kernel using Intel AVX512 instructions.
* @note it uses AVX512.
*/
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file intel/meta_utils.h
* @brief Meta programming utils
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file api/api_container.cc
* @brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc)
......
/*!
/**
* Copyright (c) 2022 by Contributors
* @file api/api_test.cc
* @brief C APIs for testing FFI
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file array/arith.h
* @brief Arithmetic functors
......
/*!
/**
* Copyright (c) 2019-2021 by Contributors
* @file array/array.cc
* @brief DGL array utilities implementation
......@@ -956,7 +956,7 @@ COOToSimple(const COOMatrix& coo) {
const COOMatrix &coalesced_adj = coalesced_result.first;
const IdArray &count = coalesced_result.second;
/*
/**
* eids_shuffled actually already contains the mapping from old edge space to the
* new one:
*
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file array/array_aritch.cc
* @brief DGL array arithmetic operations
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file array/array_op.h
* @brief Array operator templates
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file array/check.h
* @brief DGL check utilities
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file array/cpu/array_cumsum.cc
* @brief Array cumsum CPU implementation
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment