Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*!
/**
* Copyright (c) 2019 by Contributors
* @file tcp_socket.h
* @brief TCP socket for DGL distributed training.
......@@ -19,23 +19,23 @@
namespace dgl {
namespace network {
/*!
/**
* @brief TCPSocket is a simple wrapper around a socket.
* It supports only TCP connections.
*/
class TCPSocket {
public:
/*!
/**
* @brief TCPSocket constructor
*/
TCPSocket();
/*!
/**
* @brief TCPSocket deconstructor
*/
~TCPSocket();
/*!
/**
* @brief Connect to a given server address
* @param ip ip address
* @param port end port
......@@ -43,7 +43,7 @@ class TCPSocket {
*/
bool Connect(const char* ip, int port);
/*!
/**
* @brief Bind on the given IP and PORT
* @param ip ip address
* @param port end port
......@@ -51,14 +51,14 @@ class TCPSocket {
*/
bool Bind(const char* ip, int port);
/*!
/**
* @brief listen for remote connection
* @param max_connection maximal connection
* @return true for success and false for failure
*/
bool Listen(int max_connection);
/*!
/**
* @brief wait doe a new connection
* @param socket new SOCKET will be stored to socket
* @param ip_client new IP will be stored to ip_client
......@@ -67,7 +67,7 @@ class TCPSocket {
*/
bool Accept(TCPSocket* socket, std::string* ip_client, int* port_client);
/*!
/**
* @brief SetNonBlocking() is needed refering to this example of epoll:
* http://www.kernel.org/doc/man-pages/online/pages/man4/epoll.4.html
* @param flag true for nonblocking, false for blocking
......@@ -75,13 +75,13 @@ class TCPSocket {
*/
bool SetNonBlocking(bool flag);
/*!
/**
* @brief Set timeout for socket
* @param timeout seconds timeout
*/
void SetTimeout(int timeout);
/*!
/**
* @brief Shut down one or both halves of the connection.
* @param ways ways for shutdown
* If ways is SHUT_RD, further receives are disallowed.
......@@ -91,12 +91,12 @@ class TCPSocket {
*/
bool ShutDown(int ways);
/*!
/**
* @brief close socket.
*/
void Close();
/*!
/**
* @brief Send data.
* @param data data for sending
* @param len_data length of data
......@@ -104,7 +104,7 @@ class TCPSocket {
*/
int64_t Send(const char* data, int64_t len_data);
/*!
/**
* @brief Receive data.
* @param buffer buffer for receving
* @param size_buffer size of buffer
......@@ -112,14 +112,14 @@ class TCPSocket {
*/
int64_t Receive(char* buffer, int64_t size_buffer);
/*!
/**
* @brief Get socket's file descriptor
* @return socket's file descriptor
*/
int Socket() const;
private:
/*!
/**
* @brief socket's file descriptor
*/
int socket_;
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file rpc/rpc.cc
* @brief Implementation of RPC utilities used by both server and client sides.
......@@ -382,7 +382,7 @@ DGL_REGISTER_GLOBAL("distributed.rpc._CAPI_DGLRPCMessageGetTensors")
});
#if defined(__linux__)
/*!
/**
* @brief The signal handler.
* @param s signal
*/
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file rpc/rpc.h
* @brief Common headers for remote process call (RPC).
......@@ -34,9 +34,9 @@ struct RPCContext;
// Communicator handler type
typedef void* CommunicatorHandle;
/*! @brief Context information for RPC communication */
/** @brief Context information for RPC communication */
struct RPCContext {
/*!
/**
* @brief Rank of this process.
*
* If the process is a client, this is equal to client ID. Otherwise, the
......@@ -44,57 +44,57 @@ struct RPCContext {
*/
int32_t rank = -1;
/*!
/**
* @brief Cuurent machine ID
*/
int32_t machine_id = -1;
/*!
/**
* @brief Total number of machines.
*/
int32_t num_machines = 0;
/*!
/**
* @brief Message sequence number.
*/
std::atomic<int64_t> msg_seq{0};
/*!
/**
* @brief Total number of server.
*/
int32_t num_servers = 0;
/*!
/**
* @brief Total number of client.
*/
int32_t num_clients = 0;
/*!
/**
* @brief Current barrier count
*/
std::unordered_map<int32_t, int32_t> barrier_count;
/*!
/**
* @brief Total number of server per machine.
*/
int32_t num_servers_per_machine = 0;
/*!
/**
* @brief Sender communicator.
*/
std::shared_ptr<RPCSender> sender;
/*!
/**
* @brief Receiver communicator.
*/
std::shared_ptr<RPCReceiver> receiver;
/*!
/**
* @brief Tensorpipe global context
*/
std::shared_ptr<tensorpipe::Context> ctx;
/*!
/**
* @brief Server state data.
*
* If the process is a server, this stores necessary
......@@ -105,20 +105,20 @@ struct RPCContext {
*/
std::shared_ptr<ServerState> server_state;
/*!
/**
* @brief Cuurent group ID
*/
int32_t group_id = -1;
int32_t curr_client_id = -1;
std::unordered_map<int32_t, std::unordered_map<int32_t, int32_t>> clients_;
/*! @brief Get the RPC context singleton */
/** @brief Get the RPC context singleton */
static RPCContext* getInstance() {
static RPCContext ctx;
return &ctx;
}
/*! @brief Reset the RPC context */
/** @brief Reset the RPC context */
static void Reset() {
auto* t = getInstance();
t->rank = -1;
......@@ -159,7 +159,7 @@ struct RPCContext {
}
};
/*!
/**
* @brief Send out one RPC message.
*
* The operation is non-blocking -- it does not guarantee the payloads have
......@@ -177,7 +177,7 @@ struct RPCContext {
*/
RPCStatus SendRPCMessage(const RPCMessage& msg);
/*!
/**
* @brief Receive one RPC message.
*
* The operation is blocking -- it returns when it receives any message
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file rpc/rpc_msg.h
* @brief Common headers for remote process call (RPC).
......@@ -16,31 +16,31 @@
namespace dgl {
namespace rpc {
/*! @brief RPC message data structure
/** @brief RPC message data structure
*
* This structure is exposed to Python and can be used as argument or return
* value in C API.
*/
struct RPCMessage : public runtime::Object {
/*! @brief Service ID */
/** @brief Service ID */
int32_t service_id;
/*! @brief Sequence number of this message. */
/** @brief Sequence number of this message. */
int64_t msg_seq;
/*! @brief Client ID. */
/** @brief Client ID. */
int32_t client_id;
/*! @brief Server ID. */
/** @brief Server ID. */
int32_t server_id;
/*! @brief Payload buffer carried by this request.*/
/** @brief Payload buffer carried by this request.*/
std::string data;
/*! @brief Extra payloads in the form of tensors.*/
/** @brief Extra payloads in the form of tensors.*/
std::vector<runtime::NDArray> tensors;
/*! @brief Group ID. */
/** @brief Group ID. */
int32_t group_id{0};
bool Load(dmlc::Stream* stream) {
......@@ -70,7 +70,7 @@ struct RPCMessage : public runtime::Object {
DGL_DEFINE_OBJECT_REF(RPCMessageRef, RPCMessage);
/*! @brief RPC status flag */
/** @brief RPC status flag */
enum RPCStatus {
kRPCSuccess = 0,
kRPCTimeOut,
......
/*!
/**
* Copyright (c) 2020 by Contributors
* @file rpc/server_state.h
* @brief Implementation of RPC utilities used by both server and client sides.
......@@ -17,7 +17,7 @@
namespace dgl {
namespace rpc {
/*!
/**
* @brief Data stored in one DGL server.
*
* In a distributed setting, DGL partitions all data associated with the graph
......@@ -39,16 +39,16 @@ namespace rpc {
* shared memory.
*/
struct ServerState : public runtime::Object {
/*! @brief Key value store for NDArray data */
/** @brief Key value store for NDArray data */
std::unordered_map<std::string, runtime::NDArray> kv_store;
/*! @brief Graph structure of one partition */
/** @brief Graph structure of one partition */
HeteroGraphPtr graph;
/*! @brief Total number of nodes */
/** @brief Total number of nodes */
int64_t total_num_nodes = 0;
/*! @brief Total number of edges */
/** @brief Total number of edges */
int64_t total_num_edges = 0;
static constexpr const char* _type_key = "server_state.ServerState";
......
/*
/**
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file tp_communicator.cc
* @brief Tensorpipe Communicator for DGL distributed training.
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file tp_communicator.h
* @brief Tensorpipe Communicator for DGL distributed training.
......@@ -25,14 +25,14 @@ namespace rpc {
typedef Queue<RPCMessage> RPCMessageQueue;
/*!
/**
* @brief TPSender for DGL distributed training.
*
* TPSender is the communicator implemented by tcp socket.
*/
class TPSender : public RPCSender {
public:
/*!
/**
* @brief Sender constructor
* @param queue_size size of message queue
*/
......@@ -41,12 +41,12 @@ class TPSender : public RPCSender {
this->context = ctx;
}
/*!
/**
* @brief Sender destructor
*/
~TPSender() { Finalize(); }
/*!
/**
* @brief Connect to a receiver.
*
* When there are multiple receivers to be connected, application will call
......@@ -62,19 +62,19 @@ class TPSender : public RPCSender {
*/
bool ConnectReceiver(const std::string& addr, int recv_id) override;
/*!
/**
* @brief Send RPCMessage to specified Receiver.
* @param msg data message
* @param recv_id receiver's ID
*/
void Send(const RPCMessage& msg, int recv_id) override;
/*!
/**
* @brief Finalize TPSender
*/
void Finalize() override;
/*!
/**
* @brief Communicator type: 'tp'
*/
const std::string& NetType() const override {
......@@ -83,31 +83,31 @@ class TPSender : public RPCSender {
}
private:
/*!
/**
* @brief global context of tensorpipe
*/
std::shared_ptr<tensorpipe::Context> context;
/*!
/**
* @brief pipe for each connection of receiver
*/
std::unordered_map<int /* receiver ID */, std::shared_ptr<tensorpipe::Pipe>>
pipes_;
/*!
/**
* @brief receivers' listening address
*/
std::unordered_map<int /* receiver ID */, std::string> receiver_addrs_;
};
/*!
/**
* @brief TPReceiver for DGL distributed training.
*
* Tensorpipe Receiver is the communicator implemented by tcp socket.
*/
class TPReceiver : public RPCReceiver {
public:
/*!
/**
* @brief Receiver constructor
* @param queue_size size of message queue.
*/
......@@ -117,12 +117,12 @@ class TPReceiver : public RPCReceiver {
queue_ = std::make_shared<RPCMessageQueue>();
}
/*!
/**
* @brief Receiver destructor
*/
~TPReceiver() { Finalize(); }
/*!
/**
* @brief Wait for all the Senders to connect
* @param addr Networking address, e.g., 'tcp://127.0.0.1:50051'
* @param num_sender total number of Senders
......@@ -134,7 +134,7 @@ class TPReceiver : public RPCReceiver {
bool Wait(
const std::string& addr, int num_sender, bool blocking = true) override;
/*!
/**
* @brief Recv RPCMessage from Sender. Actually removing data from queue.
* @param msg pointer of RPCmessage
* @param timeout The timeout value in milliseconds. If zero, wait
......@@ -143,14 +143,14 @@ class TPReceiver : public RPCReceiver {
*/
RPCStatus Recv(RPCMessage* msg, int timeout) override;
/*!
/**
* @brief Finalize SocketReceiver
*
* Finalize() is not thread-safe and only one thread can invoke this API.
*/
void Finalize() override;
/*!
/**
* @brief Communicator type: 'tp' (tensorpipe)
*/
const std::string& NetType() const override {
......@@ -158,7 +158,7 @@ class TPReceiver : public RPCReceiver {
return net_type;
}
/*!
/**
* @brief Issue a receive request on pipe, and push the result into queue
*/
static void ReceiveFromPipe(
......@@ -166,45 +166,45 @@ class TPReceiver : public RPCReceiver {
std::shared_ptr<RPCMessageQueue> queue);
private:
/*!
/**
* @brief Callback for new connection is accepted.
*/
void OnAccepted(const tensorpipe::Error&, std::shared_ptr<tensorpipe::Pipe>);
private:
/*!
/**
* @brief number of sender
*/
int num_sender_;
/*!
/**
* @brief listener to build pipe
*/
std::shared_ptr<tensorpipe::Listener> listener;
/*!
/**
* @brief global context of tensorpipe
*/
std::shared_ptr<tensorpipe::Context> context;
/*!
/**
* @brief pipe for each client connections
*/
std::unordered_map<
int /* Sender (virutal) ID */, std::shared_ptr<tensorpipe::Pipe>>
pipes_;
/*!
/**
* @brief RPCMessage queue
*/
std::shared_ptr<RPCMessageQueue> queue_;
/*!
/**
* @brief number of accepted connections
*/
std::atomic<int32_t> num_connected_{0};
/*!
/**
* @brief listner
*/
std::shared_ptr<tensorpipe::Listener> listener_{nullptr};
......
/*!
/**
* Copyright (c) 2016 by Contributors
* Implementation of C API (reference: tvm/src/api/c_api.cc)
* @file c_api.cc
......@@ -17,19 +17,19 @@
#include "runtime_base.h"
/*! @brief entry to to easily hold returning information */
/** @brief entry to to easily hold returning information */
struct DGLAPIThreadLocalEntry {
/*! @brief result holder for returning strings */
/** @brief result holder for returning strings */
std::vector<std::string> ret_vec_str;
/*! @brief result holder for returning string pointers */
/** @brief result holder for returning string pointers */
std::vector<const char*> ret_vec_charp;
/*! @brief result holder for retruning string */
/** @brief result holder for retruning string */
std::string ret_str;
};
using namespace dgl::runtime;
/*! @brief Thread local store that can be used to hold return values. */
/** @brief Thread local store that can be used to hold return values. */
typedef dmlc::ThreadLocalStore<DGLAPIThreadLocalEntry> DGLAPIThreadLocalStore;
using DGLAPIObject = std::shared_ptr<Object>;
......
/*!
/**
* Copyright (c) 2016-2022 by Contributors
* @file c_runtime_api.cc
* @brief Runtime API implementation
......@@ -22,7 +22,7 @@
namespace dgl {
namespace runtime {
/*!
/**
* @brief The name of Device API factory.
* @param type The device type.
*/
......
/*!
/**
* Copyright (c) 2019 by Contributors
* @file runtime/config.cc
* @brief DGL runtime config
......
/*!
/**
* Copyright (c) 2016-2022 by Contributors
* @file cpu_device_api.cc
*/
......
/*!
/**
* Copyright (c) 2017 by Contributors
* @file cuda_common.h
* @brief Common utilities for CUDA
......@@ -109,7 +109,7 @@ inline const char* curandGetErrorString(curandStatus_t error) {
return "Unrecognized curand error string";
}
/*
/**
* @brief Cast data type to cudaDataType_t.
*/
template <typename T>
......@@ -133,7 +133,7 @@ struct cuda_dtype<double> {
};
#if CUDART_VERSION >= 11000
/*
/**
* @brief Cast index data type to cusparseIndexType_t.
*/
template <typename T>
......@@ -152,24 +152,24 @@ struct cusparse_idtype<int64_t> {
};
#endif
/*! @brief Thread local workspace */
/** @brief Thread local workspace */
class CUDAThreadEntry {
public:
/*! @brief The cusparse handler */
/** @brief The cusparse handler */
cusparseHandle_t cusparse_handle{nullptr};
/*! @brief The cublas handler */
/** @brief The cublas handler */
cublasHandle_t cublas_handle{nullptr};
/*! @brief The curand generator */
/** @brief The curand generator */
curandGenerator_t curand_gen{nullptr};
/*! @brief thread local pool*/
/** @brief thread local pool*/
WorkspacePool pool;
/*! @brief constructor */
/** @brief constructor */
CUDAThreadEntry();
// get the threadlocal workspace
static CUDAThreadEntry* ThreadLocal();
};
/*! @brief Get the current CUDA stream */
/** @brief Get the current CUDA stream */
cudaStream_t getCurrentCUDAStream();
} // namespace runtime
} // namespace dgl
......
/*!
/**
* Copyright (c) 2017-2022 by Contributors
* @file cuda_device_api.cc
* @brief GPU specific API
......@@ -194,7 +194,7 @@ class CUDADeviceAPI final : public DeviceAPI {
CUDA_CALL(cudaStreamSynchronize(static_cast<cudaStream_t>(stream)));
}
/*! NOTE: If the backend is PyTorch, we will use PyTorch's stream management,
/** NOTE: If the backend is PyTorch, we will use PyTorch's stream management,
* so just avoid calling our SetStream/CreateStream unless
* you really need advanced stream control.
* TODO(Xin): Redirect this to PyTorch or remove it.
......@@ -206,7 +206,7 @@ class CUDADeviceAPI final : public DeviceAPI {
return static_cast<DGLStreamHandle>(getCurrentCUDAStream());
}
/*! NOTE: cudaHostRegister can be called from an arbitrary GPU device,
/** NOTE: cudaHostRegister can be called from an arbitrary GPU device,
* so we don't need to specify a ctx.
* The pinned memory can be seen by all CUDA contexts,
* not just the one that performed the allocation
......
/*!
/**
* Copyright (c) 2021 by Contributors
* @file runtime/cuda/cuda_device_common.cuh
* @brief Device level functions for within cuda kernels.
......
/*!
/**
* Copyright (c) 2021 by Contributors
* @file runtime/cuda/cuda_device_common.cuh
* @brief Device level functions for within cuda kernels.
......@@ -19,7 +19,7 @@ namespace cuda {
template <typename>
class OrderedHashTable;
/*!
/**
* @brief A device-side handle for a GPU hashtable for mapping items to the
* first index at which they appear in the provided data array.
*
......@@ -179,7 +179,7 @@ class DeviceOrderedHashTable {
friend class OrderedHashTable<IdType>;
};
/*!
/**
* @brief A host-side handle for a GPU hashtable for mapping items to the
* first index at which they appear in the provided data array. This host-side
* handle is responsible for allocating and free the GPU memory of the
......
/*!
/**
* Copyright (c) 2021-2022 by Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
......
/*!
/**
* Copyright (c) 2021-2022 by Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
......
/*!
/**
* Copyright (c) 2022 by Contributors
* @file src/runtime/dlpack_convert.cc
* @brief Conversion between NDArray and DLPack.
......
/*!
/**
* Copyright (c) 2017 by Contributors
* @file dso_dll_module.cc
* @brief Module to load from dynamic shared library.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment