"git@developer.sourcefind.cn:change/sglang.git" did not exist on "9592a1f3bd07cbe5f826ef0357356df237a3476f"
Unverified Commit bcd37684 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace /*! with /**. (#4823)



* replace

* blabla

* balbla

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 619d735d
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file tcp_socket.h * @file tcp_socket.h
* @brief TCP socket for DGL distributed training. * @brief TCP socket for DGL distributed training.
...@@ -19,23 +19,23 @@ ...@@ -19,23 +19,23 @@
namespace dgl { namespace dgl {
namespace network { namespace network {
/*! /**
* @brief TCPSocket is a simple wrapper around a socket. * @brief TCPSocket is a simple wrapper around a socket.
* It supports only TCP connections. * It supports only TCP connections.
*/ */
class TCPSocket { class TCPSocket {
public: public:
/*! /**
* @brief TCPSocket constructor * @brief TCPSocket constructor
*/ */
TCPSocket(); TCPSocket();
/*! /**
* @brief TCPSocket deconstructor * @brief TCPSocket deconstructor
*/ */
~TCPSocket(); ~TCPSocket();
/*! /**
* @brief Connect to a given server address * @brief Connect to a given server address
* @param ip ip address * @param ip ip address
* @param port end port * @param port end port
...@@ -43,7 +43,7 @@ class TCPSocket { ...@@ -43,7 +43,7 @@ class TCPSocket {
*/ */
bool Connect(const char* ip, int port); bool Connect(const char* ip, int port);
/*! /**
* @brief Bind on the given IP and PORT * @brief Bind on the given IP and PORT
* @param ip ip address * @param ip ip address
* @param port end port * @param port end port
...@@ -51,14 +51,14 @@ class TCPSocket { ...@@ -51,14 +51,14 @@ class TCPSocket {
*/ */
bool Bind(const char* ip, int port); bool Bind(const char* ip, int port);
/*! /**
* @brief listen for remote connection * @brief listen for remote connection
* @param max_connection maximal connection * @param max_connection maximal connection
* @return true for success and false for failure * @return true for success and false for failure
*/ */
bool Listen(int max_connection); bool Listen(int max_connection);
/*! /**
* @brief wait doe a new connection * @brief wait doe a new connection
* @param socket new SOCKET will be stored to socket * @param socket new SOCKET will be stored to socket
* @param ip_client new IP will be stored to ip_client * @param ip_client new IP will be stored to ip_client
...@@ -67,7 +67,7 @@ class TCPSocket { ...@@ -67,7 +67,7 @@ class TCPSocket {
*/ */
bool Accept(TCPSocket* socket, std::string* ip_client, int* port_client); bool Accept(TCPSocket* socket, std::string* ip_client, int* port_client);
/*! /**
* @brief SetNonBlocking() is needed refering to this example of epoll: * @brief SetNonBlocking() is needed refering to this example of epoll:
* http://www.kernel.org/doc/man-pages/online/pages/man4/epoll.4.html * http://www.kernel.org/doc/man-pages/online/pages/man4/epoll.4.html
* @param flag true for nonblocking, false for blocking * @param flag true for nonblocking, false for blocking
...@@ -75,13 +75,13 @@ class TCPSocket { ...@@ -75,13 +75,13 @@ class TCPSocket {
*/ */
bool SetNonBlocking(bool flag); bool SetNonBlocking(bool flag);
/*! /**
* @brief Set timeout for socket * @brief Set timeout for socket
* @param timeout seconds timeout * @param timeout seconds timeout
*/ */
void SetTimeout(int timeout); void SetTimeout(int timeout);
/*! /**
* @brief Shut down one or both halves of the connection. * @brief Shut down one or both halves of the connection.
* @param ways ways for shutdown * @param ways ways for shutdown
* If ways is SHUT_RD, further receives are disallowed. * If ways is SHUT_RD, further receives are disallowed.
...@@ -91,12 +91,12 @@ class TCPSocket { ...@@ -91,12 +91,12 @@ class TCPSocket {
*/ */
bool ShutDown(int ways); bool ShutDown(int ways);
/*! /**
* @brief close socket. * @brief close socket.
*/ */
void Close(); void Close();
/*! /**
* @brief Send data. * @brief Send data.
* @param data data for sending * @param data data for sending
* @param len_data length of data * @param len_data length of data
...@@ -104,7 +104,7 @@ class TCPSocket { ...@@ -104,7 +104,7 @@ class TCPSocket {
*/ */
int64_t Send(const char* data, int64_t len_data); int64_t Send(const char* data, int64_t len_data);
/*! /**
* @brief Receive data. * @brief Receive data.
* @param buffer buffer for receving * @param buffer buffer for receving
* @param size_buffer size of buffer * @param size_buffer size of buffer
...@@ -112,14 +112,14 @@ class TCPSocket { ...@@ -112,14 +112,14 @@ class TCPSocket {
*/ */
int64_t Receive(char* buffer, int64_t size_buffer); int64_t Receive(char* buffer, int64_t size_buffer);
/*! /**
* @brief Get socket's file descriptor * @brief Get socket's file descriptor
* @return socket's file descriptor * @return socket's file descriptor
*/ */
int Socket() const; int Socket() const;
private: private:
/*! /**
* @brief socket's file descriptor * @brief socket's file descriptor
*/ */
int socket_; int socket_;
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file rpc/rpc.cc * @file rpc/rpc.cc
* @brief Implementation of RPC utilities used by both server and client sides. * @brief Implementation of RPC utilities used by both server and client sides.
...@@ -382,7 +382,7 @@ DGL_REGISTER_GLOBAL("distributed.rpc._CAPI_DGLRPCMessageGetTensors") ...@@ -382,7 +382,7 @@ DGL_REGISTER_GLOBAL("distributed.rpc._CAPI_DGLRPCMessageGetTensors")
}); });
#if defined(__linux__) #if defined(__linux__)
/*! /**
* @brief The signal handler. * @brief The signal handler.
* @param s signal * @param s signal
*/ */
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file rpc/rpc.h * @file rpc/rpc.h
* @brief Common headers for remote process call (RPC). * @brief Common headers for remote process call (RPC).
...@@ -34,9 +34,9 @@ struct RPCContext; ...@@ -34,9 +34,9 @@ struct RPCContext;
// Communicator handler type // Communicator handler type
typedef void* CommunicatorHandle; typedef void* CommunicatorHandle;
/*! @brief Context information for RPC communication */ /** @brief Context information for RPC communication */
struct RPCContext { struct RPCContext {
/*! /**
* @brief Rank of this process. * @brief Rank of this process.
* *
* If the process is a client, this is equal to client ID. Otherwise, the * If the process is a client, this is equal to client ID. Otherwise, the
...@@ -44,57 +44,57 @@ struct RPCContext { ...@@ -44,57 +44,57 @@ struct RPCContext {
*/ */
int32_t rank = -1; int32_t rank = -1;
/*! /**
* @brief Cuurent machine ID * @brief Cuurent machine ID
*/ */
int32_t machine_id = -1; int32_t machine_id = -1;
/*! /**
* @brief Total number of machines. * @brief Total number of machines.
*/ */
int32_t num_machines = 0; int32_t num_machines = 0;
/*! /**
* @brief Message sequence number. * @brief Message sequence number.
*/ */
std::atomic<int64_t> msg_seq{0}; std::atomic<int64_t> msg_seq{0};
/*! /**
* @brief Total number of server. * @brief Total number of server.
*/ */
int32_t num_servers = 0; int32_t num_servers = 0;
/*! /**
* @brief Total number of client. * @brief Total number of client.
*/ */
int32_t num_clients = 0; int32_t num_clients = 0;
/*! /**
* @brief Current barrier count * @brief Current barrier count
*/ */
std::unordered_map<int32_t, int32_t> barrier_count; std::unordered_map<int32_t, int32_t> barrier_count;
/*! /**
* @brief Total number of server per machine. * @brief Total number of server per machine.
*/ */
int32_t num_servers_per_machine = 0; int32_t num_servers_per_machine = 0;
/*! /**
* @brief Sender communicator. * @brief Sender communicator.
*/ */
std::shared_ptr<RPCSender> sender; std::shared_ptr<RPCSender> sender;
/*! /**
* @brief Receiver communicator. * @brief Receiver communicator.
*/ */
std::shared_ptr<RPCReceiver> receiver; std::shared_ptr<RPCReceiver> receiver;
/*! /**
* @brief Tensorpipe global context * @brief Tensorpipe global context
*/ */
std::shared_ptr<tensorpipe::Context> ctx; std::shared_ptr<tensorpipe::Context> ctx;
/*! /**
* @brief Server state data. * @brief Server state data.
* *
* If the process is a server, this stores necessary * If the process is a server, this stores necessary
...@@ -105,20 +105,20 @@ struct RPCContext { ...@@ -105,20 +105,20 @@ struct RPCContext {
*/ */
std::shared_ptr<ServerState> server_state; std::shared_ptr<ServerState> server_state;
/*! /**
* @brief Cuurent group ID * @brief Cuurent group ID
*/ */
int32_t group_id = -1; int32_t group_id = -1;
int32_t curr_client_id = -1; int32_t curr_client_id = -1;
std::unordered_map<int32_t, std::unordered_map<int32_t, int32_t>> clients_; std::unordered_map<int32_t, std::unordered_map<int32_t, int32_t>> clients_;
/*! @brief Get the RPC context singleton */ /** @brief Get the RPC context singleton */
static RPCContext* getInstance() { static RPCContext* getInstance() {
static RPCContext ctx; static RPCContext ctx;
return &ctx; return &ctx;
} }
/*! @brief Reset the RPC context */ /** @brief Reset the RPC context */
static void Reset() { static void Reset() {
auto* t = getInstance(); auto* t = getInstance();
t->rank = -1; t->rank = -1;
...@@ -159,7 +159,7 @@ struct RPCContext { ...@@ -159,7 +159,7 @@ struct RPCContext {
} }
}; };
/*! /**
* @brief Send out one RPC message. * @brief Send out one RPC message.
* *
* The operation is non-blocking -- it does not guarantee the payloads have * The operation is non-blocking -- it does not guarantee the payloads have
...@@ -177,7 +177,7 @@ struct RPCContext { ...@@ -177,7 +177,7 @@ struct RPCContext {
*/ */
RPCStatus SendRPCMessage(const RPCMessage& msg); RPCStatus SendRPCMessage(const RPCMessage& msg);
/*! /**
* @brief Receive one RPC message. * @brief Receive one RPC message.
* *
* The operation is blocking -- it returns when it receives any message * The operation is blocking -- it returns when it receives any message
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file rpc/rpc_msg.h * @file rpc/rpc_msg.h
* @brief Common headers for remote process call (RPC). * @brief Common headers for remote process call (RPC).
...@@ -16,31 +16,31 @@ ...@@ -16,31 +16,31 @@
namespace dgl { namespace dgl {
namespace rpc { namespace rpc {
/*! @brief RPC message data structure /** @brief RPC message data structure
* *
* This structure is exposed to Python and can be used as argument or return * This structure is exposed to Python and can be used as argument or return
* value in C API. * value in C API.
*/ */
struct RPCMessage : public runtime::Object { struct RPCMessage : public runtime::Object {
/*! @brief Service ID */ /** @brief Service ID */
int32_t service_id; int32_t service_id;
/*! @brief Sequence number of this message. */ /** @brief Sequence number of this message. */
int64_t msg_seq; int64_t msg_seq;
/*! @brief Client ID. */ /** @brief Client ID. */
int32_t client_id; int32_t client_id;
/*! @brief Server ID. */ /** @brief Server ID. */
int32_t server_id; int32_t server_id;
/*! @brief Payload buffer carried by this request.*/ /** @brief Payload buffer carried by this request.*/
std::string data; std::string data;
/*! @brief Extra payloads in the form of tensors.*/ /** @brief Extra payloads in the form of tensors.*/
std::vector<runtime::NDArray> tensors; std::vector<runtime::NDArray> tensors;
/*! @brief Group ID. */ /** @brief Group ID. */
int32_t group_id{0}; int32_t group_id{0};
bool Load(dmlc::Stream* stream) { bool Load(dmlc::Stream* stream) {
...@@ -70,7 +70,7 @@ struct RPCMessage : public runtime::Object { ...@@ -70,7 +70,7 @@ struct RPCMessage : public runtime::Object {
DGL_DEFINE_OBJECT_REF(RPCMessageRef, RPCMessage); DGL_DEFINE_OBJECT_REF(RPCMessageRef, RPCMessage);
/*! @brief RPC status flag */ /** @brief RPC status flag */
enum RPCStatus { enum RPCStatus {
kRPCSuccess = 0, kRPCSuccess = 0,
kRPCTimeOut, kRPCTimeOut,
......
/*! /**
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* @file rpc/server_state.h * @file rpc/server_state.h
* @brief Implementation of RPC utilities used by both server and client sides. * @brief Implementation of RPC utilities used by both server and client sides.
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
namespace dgl { namespace dgl {
namespace rpc { namespace rpc {
/*! /**
* @brief Data stored in one DGL server. * @brief Data stored in one DGL server.
* *
* In a distributed setting, DGL partitions all data associated with the graph * In a distributed setting, DGL partitions all data associated with the graph
...@@ -39,16 +39,16 @@ namespace rpc { ...@@ -39,16 +39,16 @@ namespace rpc {
* shared memory. * shared memory.
*/ */
struct ServerState : public runtime::Object { struct ServerState : public runtime::Object {
/*! @brief Key value store for NDArray data */ /** @brief Key value store for NDArray data */
std::unordered_map<std::string, runtime::NDArray> kv_store; std::unordered_map<std::string, runtime::NDArray> kv_store;
/*! @brief Graph structure of one partition */ /** @brief Graph structure of one partition */
HeteroGraphPtr graph; HeteroGraphPtr graph;
/*! @brief Total number of nodes */ /** @brief Total number of nodes */
int64_t total_num_nodes = 0; int64_t total_num_nodes = 0;
/*! @brief Total number of edges */ /** @brief Total number of edges */
int64_t total_num_edges = 0; int64_t total_num_edges = 0;
static constexpr const char* _type_key = "server_state.ServerState"; static constexpr const char* _type_key = "server_state.ServerState";
......
/* /**
* Copyright (c) Facebook, Inc. and its affiliates. * Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved. * All rights reserved.
* *
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file tp_communicator.cc * @file tp_communicator.cc
* @brief Tensorpipe Communicator for DGL distributed training. * @brief Tensorpipe Communicator for DGL distributed training.
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file tp_communicator.h * @file tp_communicator.h
* @brief Tensorpipe Communicator for DGL distributed training. * @brief Tensorpipe Communicator for DGL distributed training.
...@@ -25,14 +25,14 @@ namespace rpc { ...@@ -25,14 +25,14 @@ namespace rpc {
typedef Queue<RPCMessage> RPCMessageQueue; typedef Queue<RPCMessage> RPCMessageQueue;
/*! /**
* @brief TPSender for DGL distributed training. * @brief TPSender for DGL distributed training.
* *
* TPSender is the communicator implemented by tcp socket. * TPSender is the communicator implemented by tcp socket.
*/ */
class TPSender : public RPCSender { class TPSender : public RPCSender {
public: public:
/*! /**
* @brief Sender constructor * @brief Sender constructor
* @param queue_size size of message queue * @param queue_size size of message queue
*/ */
...@@ -41,12 +41,12 @@ class TPSender : public RPCSender { ...@@ -41,12 +41,12 @@ class TPSender : public RPCSender {
this->context = ctx; this->context = ctx;
} }
/*! /**
* @brief Sender destructor * @brief Sender destructor
*/ */
~TPSender() { Finalize(); } ~TPSender() { Finalize(); }
/*! /**
* @brief Connect to a receiver. * @brief Connect to a receiver.
* *
* When there are multiple receivers to be connected, application will call * When there are multiple receivers to be connected, application will call
...@@ -62,19 +62,19 @@ class TPSender : public RPCSender { ...@@ -62,19 +62,19 @@ class TPSender : public RPCSender {
*/ */
bool ConnectReceiver(const std::string& addr, int recv_id) override; bool ConnectReceiver(const std::string& addr, int recv_id) override;
/*! /**
* @brief Send RPCMessage to specified Receiver. * @brief Send RPCMessage to specified Receiver.
* @param msg data message * @param msg data message
* @param recv_id receiver's ID * @param recv_id receiver's ID
*/ */
void Send(const RPCMessage& msg, int recv_id) override; void Send(const RPCMessage& msg, int recv_id) override;
/*! /**
* @brief Finalize TPSender * @brief Finalize TPSender
*/ */
void Finalize() override; void Finalize() override;
/*! /**
* @brief Communicator type: 'tp' * @brief Communicator type: 'tp'
*/ */
const std::string& NetType() const override { const std::string& NetType() const override {
...@@ -83,31 +83,31 @@ class TPSender : public RPCSender { ...@@ -83,31 +83,31 @@ class TPSender : public RPCSender {
} }
private: private:
/*! /**
* @brief global context of tensorpipe * @brief global context of tensorpipe
*/ */
std::shared_ptr<tensorpipe::Context> context; std::shared_ptr<tensorpipe::Context> context;
/*! /**
* @brief pipe for each connection of receiver * @brief pipe for each connection of receiver
*/ */
std::unordered_map<int /* receiver ID */, std::shared_ptr<tensorpipe::Pipe>> std::unordered_map<int /* receiver ID */, std::shared_ptr<tensorpipe::Pipe>>
pipes_; pipes_;
/*! /**
* @brief receivers' listening address * @brief receivers' listening address
*/ */
std::unordered_map<int /* receiver ID */, std::string> receiver_addrs_; std::unordered_map<int /* receiver ID */, std::string> receiver_addrs_;
}; };
/*! /**
* @brief TPReceiver for DGL distributed training. * @brief TPReceiver for DGL distributed training.
* *
* Tensorpipe Receiver is the communicator implemented by tcp socket. * Tensorpipe Receiver is the communicator implemented by tcp socket.
*/ */
class TPReceiver : public RPCReceiver { class TPReceiver : public RPCReceiver {
public: public:
/*! /**
* @brief Receiver constructor * @brief Receiver constructor
* @param queue_size size of message queue. * @param queue_size size of message queue.
*/ */
...@@ -117,12 +117,12 @@ class TPReceiver : public RPCReceiver { ...@@ -117,12 +117,12 @@ class TPReceiver : public RPCReceiver {
queue_ = std::make_shared<RPCMessageQueue>(); queue_ = std::make_shared<RPCMessageQueue>();
} }
/*! /**
* @brief Receiver destructor * @brief Receiver destructor
*/ */
~TPReceiver() { Finalize(); } ~TPReceiver() { Finalize(); }
/*! /**
* @brief Wait for all the Senders to connect * @brief Wait for all the Senders to connect
* @param addr Networking address, e.g., 'tcp://127.0.0.1:50051' * @param addr Networking address, e.g., 'tcp://127.0.0.1:50051'
* @param num_sender total number of Senders * @param num_sender total number of Senders
...@@ -134,7 +134,7 @@ class TPReceiver : public RPCReceiver { ...@@ -134,7 +134,7 @@ class TPReceiver : public RPCReceiver {
bool Wait( bool Wait(
const std::string& addr, int num_sender, bool blocking = true) override; const std::string& addr, int num_sender, bool blocking = true) override;
/*! /**
* @brief Recv RPCMessage from Sender. Actually removing data from queue. * @brief Recv RPCMessage from Sender. Actually removing data from queue.
* @param msg pointer of RPCmessage * @param msg pointer of RPCmessage
* @param timeout The timeout value in milliseconds. If zero, wait * @param timeout The timeout value in milliseconds. If zero, wait
...@@ -143,14 +143,14 @@ class TPReceiver : public RPCReceiver { ...@@ -143,14 +143,14 @@ class TPReceiver : public RPCReceiver {
*/ */
RPCStatus Recv(RPCMessage* msg, int timeout) override; RPCStatus Recv(RPCMessage* msg, int timeout) override;
/*! /**
* @brief Finalize SocketReceiver * @brief Finalize SocketReceiver
* *
* Finalize() is not thread-safe and only one thread can invoke this API. * Finalize() is not thread-safe and only one thread can invoke this API.
*/ */
void Finalize() override; void Finalize() override;
/*! /**
* @brief Communicator type: 'tp' (tensorpipe) * @brief Communicator type: 'tp' (tensorpipe)
*/ */
const std::string& NetType() const override { const std::string& NetType() const override {
...@@ -158,7 +158,7 @@ class TPReceiver : public RPCReceiver { ...@@ -158,7 +158,7 @@ class TPReceiver : public RPCReceiver {
return net_type; return net_type;
} }
/*! /**
* @brief Issue a receive request on pipe, and push the result into queue * @brief Issue a receive request on pipe, and push the result into queue
*/ */
static void ReceiveFromPipe( static void ReceiveFromPipe(
...@@ -166,45 +166,45 @@ class TPReceiver : public RPCReceiver { ...@@ -166,45 +166,45 @@ class TPReceiver : public RPCReceiver {
std::shared_ptr<RPCMessageQueue> queue); std::shared_ptr<RPCMessageQueue> queue);
private: private:
/*! /**
* @brief Callback for new connection is accepted. * @brief Callback for new connection is accepted.
*/ */
void OnAccepted(const tensorpipe::Error&, std::shared_ptr<tensorpipe::Pipe>); void OnAccepted(const tensorpipe::Error&, std::shared_ptr<tensorpipe::Pipe>);
private: private:
/*! /**
* @brief number of sender * @brief number of sender
*/ */
int num_sender_; int num_sender_;
/*! /**
* @brief listener to build pipe * @brief listener to build pipe
*/ */
std::shared_ptr<tensorpipe::Listener> listener; std::shared_ptr<tensorpipe::Listener> listener;
/*! /**
* @brief global context of tensorpipe * @brief global context of tensorpipe
*/ */
std::shared_ptr<tensorpipe::Context> context; std::shared_ptr<tensorpipe::Context> context;
/*! /**
* @brief pipe for each client connections * @brief pipe for each client connections
*/ */
std::unordered_map< std::unordered_map<
int /* Sender (virutal) ID */, std::shared_ptr<tensorpipe::Pipe>> int /* Sender (virutal) ID */, std::shared_ptr<tensorpipe::Pipe>>
pipes_; pipes_;
/*! /**
* @brief RPCMessage queue * @brief RPCMessage queue
*/ */
std::shared_ptr<RPCMessageQueue> queue_; std::shared_ptr<RPCMessageQueue> queue_;
/*! /**
* @brief number of accepted connections * @brief number of accepted connections
*/ */
std::atomic<int32_t> num_connected_{0}; std::atomic<int32_t> num_connected_{0};
/*! /**
* @brief listner * @brief listner
*/ */
std::shared_ptr<tensorpipe::Listener> listener_{nullptr}; std::shared_ptr<tensorpipe::Listener> listener_{nullptr};
......
/*! /**
* Copyright (c) 2016 by Contributors * Copyright (c) 2016 by Contributors
* Implementation of C API (reference: tvm/src/api/c_api.cc) * Implementation of C API (reference: tvm/src/api/c_api.cc)
* @file c_api.cc * @file c_api.cc
...@@ -17,19 +17,19 @@ ...@@ -17,19 +17,19 @@
#include "runtime_base.h" #include "runtime_base.h"
/*! @brief entry to to easily hold returning information */ /** @brief entry to to easily hold returning information */
struct DGLAPIThreadLocalEntry { struct DGLAPIThreadLocalEntry {
/*! @brief result holder for returning strings */ /** @brief result holder for returning strings */
std::vector<std::string> ret_vec_str; std::vector<std::string> ret_vec_str;
/*! @brief result holder for returning string pointers */ /** @brief result holder for returning string pointers */
std::vector<const char*> ret_vec_charp; std::vector<const char*> ret_vec_charp;
/*! @brief result holder for retruning string */ /** @brief result holder for retruning string */
std::string ret_str; std::string ret_str;
}; };
using namespace dgl::runtime; using namespace dgl::runtime;
/*! @brief Thread local store that can be used to hold return values. */ /** @brief Thread local store that can be used to hold return values. */
typedef dmlc::ThreadLocalStore<DGLAPIThreadLocalEntry> DGLAPIThreadLocalStore; typedef dmlc::ThreadLocalStore<DGLAPIThreadLocalEntry> DGLAPIThreadLocalStore;
using DGLAPIObject = std::shared_ptr<Object>; using DGLAPIObject = std::shared_ptr<Object>;
......
/*! /**
* Copyright (c) 2016-2022 by Contributors * Copyright (c) 2016-2022 by Contributors
* @file c_runtime_api.cc * @file c_runtime_api.cc
* @brief Runtime API implementation * @brief Runtime API implementation
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
namespace dgl { namespace dgl {
namespace runtime { namespace runtime {
/*! /**
* @brief The name of Device API factory. * @brief The name of Device API factory.
* @param type The device type. * @param type The device type.
*/ */
......
/*! /**
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* @file runtime/config.cc * @file runtime/config.cc
* @brief DGL runtime config * @brief DGL runtime config
......
/*! /**
* Copyright (c) 2016-2022 by Contributors * Copyright (c) 2016-2022 by Contributors
* @file cpu_device_api.cc * @file cpu_device_api.cc
*/ */
......
/*! /**
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* @file cuda_common.h * @file cuda_common.h
* @brief Common utilities for CUDA * @brief Common utilities for CUDA
...@@ -109,7 +109,7 @@ inline const char* curandGetErrorString(curandStatus_t error) { ...@@ -109,7 +109,7 @@ inline const char* curandGetErrorString(curandStatus_t error) {
return "Unrecognized curand error string"; return "Unrecognized curand error string";
} }
/* /**
* @brief Cast data type to cudaDataType_t. * @brief Cast data type to cudaDataType_t.
*/ */
template <typename T> template <typename T>
...@@ -133,7 +133,7 @@ struct cuda_dtype<double> { ...@@ -133,7 +133,7 @@ struct cuda_dtype<double> {
}; };
#if CUDART_VERSION >= 11000 #if CUDART_VERSION >= 11000
/* /**
* @brief Cast index data type to cusparseIndexType_t. * @brief Cast index data type to cusparseIndexType_t.
*/ */
template <typename T> template <typename T>
...@@ -152,24 +152,24 @@ struct cusparse_idtype<int64_t> { ...@@ -152,24 +152,24 @@ struct cusparse_idtype<int64_t> {
}; };
#endif #endif
/*! @brief Thread local workspace */ /** @brief Thread local workspace */
class CUDAThreadEntry { class CUDAThreadEntry {
public: public:
/*! @brief The cusparse handler */ /** @brief The cusparse handler */
cusparseHandle_t cusparse_handle{nullptr}; cusparseHandle_t cusparse_handle{nullptr};
/*! @brief The cublas handler */ /** @brief The cublas handler */
cublasHandle_t cublas_handle{nullptr}; cublasHandle_t cublas_handle{nullptr};
/*! @brief The curand generator */ /** @brief The curand generator */
curandGenerator_t curand_gen{nullptr}; curandGenerator_t curand_gen{nullptr};
/*! @brief thread local pool*/ /** @brief thread local pool*/
WorkspacePool pool; WorkspacePool pool;
/*! @brief constructor */ /** @brief constructor */
CUDAThreadEntry(); CUDAThreadEntry();
// get the threadlocal workspace // get the threadlocal workspace
static CUDAThreadEntry* ThreadLocal(); static CUDAThreadEntry* ThreadLocal();
}; };
/*! @brief Get the current CUDA stream */ /** @brief Get the current CUDA stream */
cudaStream_t getCurrentCUDAStream(); cudaStream_t getCurrentCUDAStream();
} // namespace runtime } // namespace runtime
} // namespace dgl } // namespace dgl
......
/*! /**
* Copyright (c) 2017-2022 by Contributors * Copyright (c) 2017-2022 by Contributors
* @file cuda_device_api.cc * @file cuda_device_api.cc
* @brief GPU specific API * @brief GPU specific API
...@@ -194,7 +194,7 @@ class CUDADeviceAPI final : public DeviceAPI { ...@@ -194,7 +194,7 @@ class CUDADeviceAPI final : public DeviceAPI {
CUDA_CALL(cudaStreamSynchronize(static_cast<cudaStream_t>(stream))); CUDA_CALL(cudaStreamSynchronize(static_cast<cudaStream_t>(stream)));
} }
/*! NOTE: If the backend is PyTorch, we will use PyTorch's stream management, /** NOTE: If the backend is PyTorch, we will use PyTorch's stream management,
* so just avoid calling our SetStream/CreateStream unless * so just avoid calling our SetStream/CreateStream unless
* you really need advanced stream control. * you really need advanced stream control.
* TODO(Xin): Redirect this to PyTorch or remove it. * TODO(Xin): Redirect this to PyTorch or remove it.
...@@ -206,7 +206,7 @@ class CUDADeviceAPI final : public DeviceAPI { ...@@ -206,7 +206,7 @@ class CUDADeviceAPI final : public DeviceAPI {
return static_cast<DGLStreamHandle>(getCurrentCUDAStream()); return static_cast<DGLStreamHandle>(getCurrentCUDAStream());
} }
/*! NOTE: cudaHostRegister can be called from an arbitrary GPU device, /** NOTE: cudaHostRegister can be called from an arbitrary GPU device,
* so we don't need to specify a ctx. * so we don't need to specify a ctx.
* The pinned memory can be seen by all CUDA contexts, * The pinned memory can be seen by all CUDA contexts,
* not just the one that performed the allocation * not just the one that performed the allocation
......
/*! /**
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* @file runtime/cuda/cuda_device_common.cuh * @file runtime/cuda/cuda_device_common.cuh
* @brief Device level functions for within cuda kernels. * @brief Device level functions for within cuda kernels.
......
/*! /**
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* @file runtime/cuda/cuda_device_common.cuh * @file runtime/cuda/cuda_device_common.cuh
* @brief Device level functions for within cuda kernels. * @brief Device level functions for within cuda kernels.
...@@ -19,7 +19,7 @@ namespace cuda { ...@@ -19,7 +19,7 @@ namespace cuda {
template <typename> template <typename>
class OrderedHashTable; class OrderedHashTable;
/*! /**
* @brief A device-side handle for a GPU hashtable for mapping items to the * @brief A device-side handle for a GPU hashtable for mapping items to the
* first index at which they appear in the provided data array. * first index at which they appear in the provided data array.
* *
...@@ -179,7 +179,7 @@ class DeviceOrderedHashTable { ...@@ -179,7 +179,7 @@ class DeviceOrderedHashTable {
friend class OrderedHashTable<IdType>; friend class OrderedHashTable<IdType>;
}; };
/*! /**
* @brief A host-side handle for a GPU hashtable for mapping items to the * @brief A host-side handle for a GPU hashtable for mapping items to the
* first index at which they appear in the provided data array. This host-side * first index at which they appear in the provided data array. This host-side
* handle is responsible for allocating and free the GPU memory of the * handle is responsible for allocating and free the GPU memory of the
......
/*! /**
* Copyright (c) 2021-2022 by Contributors * Copyright (c) 2021-2022 by Contributors
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
......
/*! /**
* Copyright (c) 2021-2022 by Contributors * Copyright (c) 2021-2022 by Contributors
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
......
/*! /**
* Copyright (c) 2022 by Contributors * Copyright (c) 2022 by Contributors
* @file src/runtime/dlpack_convert.cc * @file src/runtime/dlpack_convert.cc
* @brief Conversion between NDArray and DLPack. * @brief Conversion between NDArray and DLPack.
......
/*! /**
* Copyright (c) 2017 by Contributors * Copyright (c) 2017 by Contributors
* @file dso_dll_module.cc * @file dso_dll_module.cc
* @brief Module to load from dynamic shared library. * @brief Module to load from dynamic shared library.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment