Unverified Commit a1d50f0f authored by Lingfan Yu's avatar Lingfan Yu Committed by GitHub
Browse files

[Refactor] Rename before release (#261)

* include/dgl/runtime

* include

* src/runtime

* src/graph

* src/scheduler

* src

* clean up CMakeLists

* further clean up in cmake

* install commands

* python/dgl/_ffi/_cython

* python/dgl/_ffi/_ctypes

* python/dgl/_ffi

* python/dgl

* some fix

* copy right
parent aabba9d4
/*!
* Copyright (c) 2017 by Contributors
* \file module.cc
* \brief TVM module system
* \brief DGL module system
*/
#include <dgl/runtime/module.h>
#include <dgl/runtime/registry.h>
......@@ -12,7 +12,7 @@
#include "file_util.h"
#endif
namespace tvm {
namespace dgl {
namespace runtime {
void Module::Import(Module other) {
......@@ -134,42 +134,42 @@ bool RuntimeEnabled(const std::string& target) {
return runtime::Registry::Get(f_name) != nullptr;
}
TVM_REGISTER_GLOBAL("module._Enabled")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._Enabled")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = RuntimeEnabled(args[0]);
});
TVM_REGISTER_GLOBAL("module._GetSource")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._GetSource")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = args[0].operator Module()->GetSource(args[1]);
});
TVM_REGISTER_GLOBAL("module._ImportsSize")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._ImportsSize")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = static_cast<int64_t>(
args[0].operator Module()->imports().size());
});
TVM_REGISTER_GLOBAL("module._GetImport")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._GetImport")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = args[0].operator Module()->
imports().at(args[1].operator int());
});
TVM_REGISTER_GLOBAL("module._GetTypeKey")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._GetTypeKey")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = std::string(args[0].operator Module()->type_key());
});
TVM_REGISTER_GLOBAL("module._LoadFromFile")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._LoadFromFile")
.set_body([](DGLArgs args, DGLRetValue *ret) {
*ret = Module::LoadFromFile(args[0], args[1]);
});
TVM_REGISTER_GLOBAL("module._SaveToFile")
.set_body([](TVMArgs args, TVMRetValue *ret) {
DGL_REGISTER_GLOBAL("module._SaveToFile")
.set_body([](DGLArgs args, DGLRetValue *ret) {
args[0].operator Module()->
SaveToFile(args[1], args[2]);
});
} // namespace runtime
} // namespace tvm
} // namespace dgl
......@@ -11,7 +11,7 @@
#include <string>
#include "module_util.h"
namespace tvm {
namespace dgl {
namespace runtime {
void ImportModuleBlob(const char* mblob, std::vector<Module>* mlist) {
......@@ -45,14 +45,14 @@ void ImportModuleBlob(const char* mblob, std::vector<Module>* mlist) {
PackedFunc WrapPackedFunc(BackendPackedCFunc faddr,
const std::shared_ptr<ModuleNode>& sptr_to_self) {
return PackedFunc([faddr, sptr_to_self](TVMArgs args, TVMRetValue* rv) {
return PackedFunc([faddr, sptr_to_self](DGLArgs args, DGLRetValue* rv) {
int ret = (*faddr)(
const_cast<TVMValue*>(args.values),
const_cast<DGLValue*>(args.values),
const_cast<int*>(args.type_codes),
args.num_args);
CHECK_EQ(ret, 0) << TVMGetLastError();
CHECK_EQ(ret, 0) << DGLGetLastError();
});
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
......@@ -18,7 +18,7 @@ typedef int (*BackendPackedCFunc)(void* args,
int num_args);
} // extern "C"
namespace tvm {
namespace dgl {
namespace runtime {
/*!
* \brief Wrap a BackendPackedCFunc to packed function.
......@@ -40,22 +40,22 @@ void ImportModuleBlob(const char* mblob, std::vector<Module>* module_list);
*/
template<typename FLookup>
void InitContextFunctions(FLookup flookup) {
#define TVM_INIT_CONTEXT_FUNC(FuncName) \
#define DGL_INIT_CONTEXT_FUNC(FuncName) \
if (auto *fp = reinterpret_cast<decltype(&FuncName)*> \
(flookup("__" #FuncName))) { \
*fp = FuncName; \
}
// Initialize the functions
TVM_INIT_CONTEXT_FUNC(TVMFuncCall);
TVM_INIT_CONTEXT_FUNC(TVMAPISetLastError);
TVM_INIT_CONTEXT_FUNC(TVMBackendGetFuncFromEnv);
TVM_INIT_CONTEXT_FUNC(TVMBackendAllocWorkspace);
TVM_INIT_CONTEXT_FUNC(TVMBackendFreeWorkspace);
TVM_INIT_CONTEXT_FUNC(TVMBackendParallelLaunch);
TVM_INIT_CONTEXT_FUNC(TVMBackendParallelBarrier);
DGL_INIT_CONTEXT_FUNC(DGLFuncCall);
DGL_INIT_CONTEXT_FUNC(DGLAPISetLastError);
DGL_INIT_CONTEXT_FUNC(DGLBackendGetFuncFromEnv);
DGL_INIT_CONTEXT_FUNC(DGLBackendAllocWorkspace);
DGL_INIT_CONTEXT_FUNC(DGLBackendFreeWorkspace);
DGL_INIT_CONTEXT_FUNC(DGLBackendParallelLaunch);
DGL_INIT_CONTEXT_FUNC(DGLBackendParallelBarrier);
#undef TVM_INIT_CONTEXT_FUNC
#undef DGL_INIT_CONTEXT_FUNC
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
#endif // DGL_RUNTIME_MODULE_UTIL_H_
......@@ -12,7 +12,7 @@
// deleter for arrays used by DLPack exporter
extern "C" void NDArrayDLPackDeleter(DLManagedTensor* tensor);
namespace tvm {
namespace dgl {
namespace runtime {
inline void VerifyDataType(DLDataType dtype) {
......@@ -27,7 +27,7 @@ inline void VerifyDataType(DLDataType dtype) {
inline size_t GetDataSize(const DLTensor& arr) {
size_t size = 1;
for (tvm_index_t i = 0; i < arr.ndim; ++i) {
for (dgl_index_t i = 0; i < arr.ndim; ++i) {
size *= arr.shape[i];
}
size *= (arr.dtype.bits * arr.dtype.lanes + 7) / 8;
......@@ -43,18 +43,18 @@ inline size_t GetDataAlignment(const DLTensor& arr) {
struct NDArray::Internal {
// Default deleter for the container
static void DefaultDeleter(NDArray::Container* ptr) {
using tvm::runtime::NDArray;
using dgl::runtime::NDArray;
if (ptr->manager_ctx != nullptr) {
static_cast<NDArray::Container*>(ptr->manager_ctx)->DecRef();
} else if (ptr->dl_tensor.data != nullptr) {
tvm::runtime::DeviceAPI::Get(ptr->dl_tensor.ctx)->FreeDataSpace(
dgl::runtime::DeviceAPI::Get(ptr->dl_tensor.ctx)->FreeDataSpace(
ptr->dl_tensor.ctx, ptr->dl_tensor.data);
}
delete ptr;
}
// Deleter for NDArray converted from DLPack
// This is used from data which is passed from external DLPack(DLManagedTensor)
// that are not allocated inside of TVM.
// that are not allocated inside of DGL.
// This enables us to create NDArray from memory allocated by other
// frameworks that are DLPack compatible
static void DLPackDeleter(NDArray::Container* ptr) {
......@@ -158,11 +158,11 @@ NDArray NDArray::FromDLPack(DLManagedTensor* tensor) {
void NDArray::CopyFromTo(DLTensor* from,
DLTensor* to,
TVMStreamHandle stream) {
DGLStreamHandle stream) {
size_t from_size = GetDataSize(*from);
size_t to_size = GetDataSize(*to);
CHECK_EQ(from_size, to_size)
<< "TVMArrayCopyFromTo: The size must exactly match";
<< "DGLArrayCopyFromTo: The size must exactly match";
CHECK(from->ctx.device_type == to->ctx.device_type
|| from->ctx.device_type == kDLCPU
......@@ -171,7 +171,7 @@ void NDArray::CopyFromTo(DLTensor* from,
// Use the context that is *not* a cpu context to get the correct device
// api manager.
TVMContext ctx = from->ctx.device_type != kDLCPU ? from->ctx : to->ctx;
DGLContext ctx = from->ctx.device_type != kDLCPU ? from->ctx : to->ctx;
DeviceAPI::Get(ctx)->CopyDataFromTo(
from->data, static_cast<size_t>(from->byte_offset),
......@@ -180,23 +180,23 @@ void NDArray::CopyFromTo(DLTensor* from,
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
using namespace tvm::runtime;
using namespace dgl::runtime;
void NDArrayDLPackDeleter(DLManagedTensor* tensor) {
static_cast<NDArray::Container*>(tensor->manager_ctx)->DecRef();
delete tensor;
}
int TVMArrayAlloc(const tvm_index_t* shape,
int DGLArrayAlloc(const dgl_index_t* shape,
int ndim,
int dtype_code,
int dtype_bits,
int dtype_lanes,
int device_type,
int device_id,
TVMArrayHandle* out) {
DGLArrayHandle* out) {
API_BEGIN();
DLDataType dtype;
dtype.code = static_cast<uint8_t>(dtype_code);
......@@ -210,48 +210,48 @@ int TVMArrayAlloc(const tvm_index_t* shape,
API_END();
}
int TVMArrayFree(TVMArrayHandle handle) {
int DGLArrayFree(DGLArrayHandle handle) {
API_BEGIN();
reinterpret_cast<NDArray::Container*>(handle)->DecRef();
API_END();
}
int TVMArrayCopyFromTo(TVMArrayHandle from,
TVMArrayHandle to,
TVMStreamHandle stream) {
int DGLArrayCopyFromTo(DGLArrayHandle from,
DGLArrayHandle to,
DGLStreamHandle stream) {
API_BEGIN();
NDArray::CopyFromTo(from, to, stream);
API_END();
}
int TVMArrayFromDLPack(DLManagedTensor* from,
TVMArrayHandle* out) {
int DGLArrayFromDLPack(DLManagedTensor* from,
DGLArrayHandle* out) {
API_BEGIN();
*out = NDArray::Internal::MoveAsDLTensor(NDArray::FromDLPack(from));
API_END();
}
int TVMArrayToDLPack(TVMArrayHandle from,
int DGLArrayToDLPack(DGLArrayHandle from,
DLManagedTensor** out) {
API_BEGIN();
*out = NDArray::Internal::ToDLPack(reinterpret_cast<NDArray::Container*>(from));
API_END();
}
void TVMDLManagedTensorCallDeleter(DLManagedTensor* dltensor) {
void DGLDLManagedTensorCallDeleter(DLManagedTensor* dltensor) {
(*(dltensor->deleter))(dltensor);
}
int TVMArrayCopyFromBytes(TVMArrayHandle handle,
int DGLArrayCopyFromBytes(DGLArrayHandle handle,
void* data,
size_t nbytes) {
API_BEGIN();
TVMContext cpu_ctx;
DGLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
size_t arr_size = GetDataSize(*handle);
CHECK_EQ(arr_size, nbytes)
<< "TVMArrayCopyFromBytes: size mismatch";
<< "DGLArrayCopyFromBytes: size mismatch";
DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
data, 0,
handle->data, static_cast<size_t>(handle->byte_offset),
......@@ -259,16 +259,16 @@ int TVMArrayCopyFromBytes(TVMArrayHandle handle,
API_END();
}
int TVMArrayCopyToBytes(TVMArrayHandle handle,
int DGLArrayCopyToBytes(DGLArrayHandle handle,
void* data,
size_t nbytes) {
API_BEGIN();
TVMContext cpu_ctx;
DGLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
size_t arr_size = GetDataSize(*handle);
CHECK_EQ(arr_size, nbytes)
<< "TVMArrayCopyToBytes: size mismatch";
<< "DGLArrayCopyToBytes: size mismatch";
DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
handle->data, static_cast<size_t>(handle->byte_offset),
data, 0,
......
/*!
* Copyright (c) 2017 by Contributors
* \file pack_args.h
* \brief Utility to pack TVMArgs to other type-erased fution calling convention.
* \brief Utility to pack DGLArgs to other type-erased fution calling convention.
*
* Two type erased function signatures are supported.
* - cuda_style(void** args, int num_args);
......@@ -17,7 +17,7 @@
#include <vector>
#include <cstring>
namespace tvm {
namespace dgl {
namespace runtime {
/*!
* \brief argument union type of 32bit.
......@@ -31,42 +31,42 @@ union ArgUnion {
/*!
* \brief Create a packed function from void addr types.
*
* \param f with signiture (TVMArgs args, TVMRetValue* rv, void* void_args)
* \param f with signiture (DGLArgs args, DGLRetValue* rv, void* void_args)
* \param arg_types The arguments type information.
* \tparam F the function type
*
* \return The wrapped packed function.
*/
template<typename F>
inline PackedFunc PackFuncVoidAddr(F f, const std::vector<TVMType>& arg_types);
inline PackedFunc PackFuncVoidAddr(F f, const std::vector<DGLType>& arg_types);
/*!
* \brief Create a packed function that from function only packs buffer arguments.
*
* \param f with signiture (TVMArgs args, TVMRetValue* rv, ArgUnion* pack_args)
* \param f with signiture (DGLArgs args, DGLRetValue* rv, ArgUnion* pack_args)
* \param arg_types The arguments type information.
* \tparam F the function type
*
* \return The wrapped packed function.
*/
template<typename F>
inline PackedFunc PackFuncNonBufferArg(F f, const std::vector<TVMType>& arg_types);
inline PackedFunc PackFuncNonBufferArg(F f, const std::vector<DGLType>& arg_types);
/*!
* \brief Create a packed function that from function that takes a packed arguments.
*
* \param f with signature (TVMArgs args, TVMRetValue* rv, void* pack_args, size_t nbytes)
* \param f with signature (DGLArgs args, DGLRetValue* rv, void* pack_args, size_t nbytes)
* \param arg_types The arguments that wish to get from
* \tparam F the function type
*
* \return The wrapped packed function.
*/
template<typename F>
inline PackedFunc PackFuncPackedArg(F f, const std::vector<TVMType>& arg_types);
inline PackedFunc PackFuncPackedArg(F f, const std::vector<DGLType>& arg_types);
/*!
* \brief Extract number of buffer argument from the argument types.
* \param arg_types The argument types.
* \return number of buffer arguments
*/
inline size_t NumBufferArgs(const std::vector<TVMType>& arg_types);
inline size_t NumBufferArgs(const std::vector<DGLType>& arg_types);
// implementations details
namespace detail {
......@@ -101,7 +101,7 @@ enum ArgConvertCode {
HANDLE_TO_HANDLE
};
inline ArgConvertCode GetArgConvertCode(TVMType t) {
inline ArgConvertCode GetArgConvertCode(DGLType t) {
CHECK_EQ(t.lanes, 1U)
<< "Cannot pass vector type argument to devic function for now";
if (t.code == kDLInt) {
......@@ -122,7 +122,7 @@ inline ArgConvertCode GetArgConvertCode(TVMType t) {
template<int N, typename F>
inline PackedFunc PackFuncVoidAddr_(F f, const std::vector<ArgConvertCode>& codes) {
int num_args = static_cast<int>(codes.size());
auto ret = [f, codes, num_args](TVMArgs args, TVMRetValue* ret) {
auto ret = [f, codes, num_args](DGLArgs args, DGLRetValue* ret) {
TempArray<void*, N> addr_(num_args);
TempArray<ArgUnion, N> holder_(num_args);
void** addr = addr_.data();
......@@ -161,7 +161,7 @@ template<int N, typename F>
inline PackedFunc PackFuncNonBufferArg_(
F f, int base, const std::vector<ArgConvertCode>& codes) {
int num_args = static_cast<int>(codes.size());
auto ret = [f, codes, base, num_args](TVMArgs args, TVMRetValue* ret) {
auto ret = [f, codes, base, num_args](DGLArgs args, DGLRetValue* ret) {
TempArray<ArgUnion, N> holder_(num_args);
ArgUnion* holder = holder_.data();
for (int i = 0; i < num_args; ++i) {
......@@ -196,11 +196,11 @@ template<int N, typename F>
inline PackedFunc PackFuncPackedArg_(
F f, const std::vector<ArgConvertCode>& codes) {
int num_args = static_cast<int>(codes.size());
auto ret = [f, codes, num_args](TVMArgs args, TVMRetValue* ret) {
auto ret = [f, codes, num_args](DGLArgs args, DGLRetValue* ret) {
TempArray<uint64_t, N> pack_(num_args);
int32_t* pack = reinterpret_cast<int32_t*>(pack_.data());
int32_t* ptr = pack;
static_assert(sizeof(TVMValue) == 8, "invariant");
static_assert(sizeof(DGLValue) == 8, "invariant");
static_assert(sizeof(void*) % sizeof(int32_t) == 0, "invariant");
for (int i = 0; i < num_args; ++i) {
switch (codes[i]) {
......@@ -211,7 +211,7 @@ inline PackedFunc PackFuncPackedArg_(
}
case INT64_TO_INT64:
case FLOAT64_TO_FLOAT64: {
std::memcpy(ptr, &args.values[i], sizeof(TVMValue));
std::memcpy(ptr, &args.values[i], sizeof(DGLValue));
ptr += 2;
break;
}
......@@ -244,7 +244,7 @@ inline PackedFunc PackFuncPackedArg_(
} // namespace detail
template<typename F>
inline PackedFunc PackFuncVoidAddr(F f, const std::vector<TVMType>& arg_types) {
inline PackedFunc PackFuncVoidAddr(F f, const std::vector<DGLType>& arg_types) {
std::vector<detail::ArgConvertCode> codes(arg_types.size());
for (size_t i = 0; i < arg_types.size(); ++i) {
codes[i] = detail::GetArgConvertCode(arg_types[i]);
......@@ -260,7 +260,7 @@ inline PackedFunc PackFuncVoidAddr(F f, const std::vector<TVMType>& arg_types) {
}
}
inline size_t NumBufferArgs(const std::vector<TVMType>& arg_types) {
inline size_t NumBufferArgs(const std::vector<DGLType>& arg_types) {
size_t base = arg_types.size();
for (size_t i = 0; i < arg_types.size(); ++i) {
if (arg_types[i].code != kHandle) {
......@@ -275,7 +275,7 @@ inline size_t NumBufferArgs(const std::vector<TVMType>& arg_types) {
}
template<typename F>
inline PackedFunc PackFuncNonBufferArg(F f, const std::vector<TVMType>& arg_types) {
inline PackedFunc PackFuncNonBufferArg(F f, const std::vector<DGLType>& arg_types) {
size_t num_buffer = NumBufferArgs(arg_types);
std::vector<detail::ArgConvertCode> codes;
for (size_t i = num_buffer; i < arg_types.size(); ++i) {
......@@ -292,7 +292,7 @@ inline PackedFunc PackFuncNonBufferArg(F f, const std::vector<TVMType>& arg_type
}
template<typename F>
inline PackedFunc PackFuncPackedArg(F f, const std::vector<TVMType>& arg_types) {
inline PackedFunc PackFuncPackedArg(F f, const std::vector<DGLType>& arg_types) {
std::vector<detail::ArgConvertCode> codes;
for (size_t i = 0; i < arg_types.size(); ++i) {
codes.push_back(detail::GetArgConvertCode(arg_types[i]));
......@@ -306,5 +306,5 @@ inline PackedFunc PackFuncPackedArg(F f, const std::vector<TVMType>& arg_types)
}
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
#endif // DGL_RUNTIME_PACK_ARGS_H_
......@@ -12,7 +12,7 @@
#include <array>
#include "runtime_base.h"
namespace tvm {
namespace dgl {
namespace runtime {
struct Registry::Manager {
......@@ -107,10 +107,10 @@ ExtTypeVTable* ExtTypeVTable::RegisterInternal(
return pvt;
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
/*! \brief entry to to easily hold returning information */
struct TVMFuncThreadLocalEntry {
struct DGLFuncThreadLocalEntry {
/*! \brief result holder for returning strings */
std::vector<std::string> ret_vec_str;
/*! \brief result holder for returning string pointers */
......@@ -118,39 +118,39 @@ struct TVMFuncThreadLocalEntry {
};
/*! \brief Thread local store that can be used to hold return values. */
typedef dmlc::ThreadLocalStore<TVMFuncThreadLocalEntry> TVMFuncThreadLocalStore;
typedef dmlc::ThreadLocalStore<DGLFuncThreadLocalEntry> DGLFuncThreadLocalStore;
int TVMExtTypeFree(void* handle, int type_code) {
int DGLExtTypeFree(void* handle, int type_code) {
API_BEGIN();
tvm::runtime::ExtTypeVTable::Get(type_code)->destroy(handle);
dgl::runtime::ExtTypeVTable::Get(type_code)->destroy(handle);
API_END();
}
int TVMFuncRegisterGlobal(
const char* name, TVMFunctionHandle f, int override) {
int DGLFuncRegisterGlobal(
const char* name, DGLFunctionHandle f, int override) {
API_BEGIN();
tvm::runtime::Registry::Register(name, override != 0)
.set_body(*static_cast<tvm::runtime::PackedFunc*>(f));
dgl::runtime::Registry::Register(name, override != 0)
.set_body(*static_cast<dgl::runtime::PackedFunc*>(f));
API_END();
}
int TVMFuncGetGlobal(const char* name, TVMFunctionHandle* out) {
int DGLFuncGetGlobal(const char* name, DGLFunctionHandle* out) {
API_BEGIN();
const tvm::runtime::PackedFunc* fp =
tvm::runtime::Registry::Get(name);
const dgl::runtime::PackedFunc* fp =
dgl::runtime::Registry::Get(name);
if (fp != nullptr) {
*out = new tvm::runtime::PackedFunc(*fp); // NOLINT(*)
*out = new dgl::runtime::PackedFunc(*fp); // NOLINT(*)
} else {
*out = nullptr;
}
API_END();
}
int TVMFuncListGlobalNames(int *out_size,
int DGLFuncListGlobalNames(int *out_size,
const char*** out_array) {
API_BEGIN();
TVMFuncThreadLocalEntry *ret = TVMFuncThreadLocalStore::Get();
ret->ret_vec_str = tvm::runtime::Registry::ListNames();
DGLFuncThreadLocalEntry *ret = DGLFuncThreadLocalStore::Get();
ret->ret_vec_str = dgl::runtime::Registry::ListNames();
ret->ret_vec_charp.clear();
for (size_t i = 0; i < ret->ret_vec_str.size(); ++i) {
ret->ret_vec_charp.push_back(ret->ret_vec_str[i].c_str());
......
......@@ -13,21 +13,21 @@
#define API_BEGIN() try {
/*! \brief every function starts with API_BEGIN();
and finishes with API_END() or API_END_HANDLE_ERROR */
#define API_END() } catch(std::runtime_error &_except_) { return TVMAPIHandleException(_except_); } return 0; // NOLINT(*)
#define API_END() } catch(std::runtime_error &_except_) { return DGLAPIHandleException(_except_); } return 0; // NOLINT(*)
/*!
* \brief every function starts with API_BEGIN();
* and finishes with API_END() or API_END_HANDLE_ERROR
* The finally clause contains procedure to cleanup states when an error happens.
*/
#define API_END_HANDLE_ERROR(Finalize) } catch(std::runtime_error &_except_) { Finalize; return TVMAPIHandleException(_except_); } return 0; // NOLINT(*)
#define API_END_HANDLE_ERROR(Finalize) } catch(std::runtime_error &_except_) { Finalize; return DGLAPIHandleException(_except_); } return 0; // NOLINT(*)
/*!
* \brief handle exception throwed out
* \param e the exception
* \return the return value of API after exception is handled
*/
inline int TVMAPIHandleException(const std::runtime_error &e) {
TVMAPISetLastError(e.what());
inline int DGLAPIHandleException(const std::runtime_error &e) {
DGLAPISetLastError(e.what());
return -1;
}
......
......@@ -8,7 +8,7 @@
#include <mutex>
#include "module_util.h"
namespace tvm {
namespace dgl {
namespace runtime {
class SystemLibModuleNode : public ModuleNode {
......@@ -41,10 +41,10 @@ class SystemLibModuleNode : public ModuleNode {
void RegisterSymbol(const std::string& name, void* ptr) {
std::lock_guard<std::mutex> lock(mutex_);
if (name == symbol::tvm_module_ctx) {
if (name == symbol::dgl_module_ctx) {
void** ctx_addr = reinterpret_cast<void**>(ptr);
*ctx_addr = this;
} else if (name == symbol::tvm_dev_mblob) {
} else if (name == symbol::dgl_dev_mblob) {
// Record pointer to content of submodules to be loaded.
// We defer loading submodules to the first call to GetFunction().
// The reason is that RegisterSymbol() gets called when initializing the
......@@ -79,14 +79,14 @@ class SystemLibModuleNode : public ModuleNode {
void* module_blob_{nullptr};
};
TVM_REGISTER_GLOBAL("module._GetSystemLib")
.set_body([](TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("module._GetSystemLib")
.set_body([](DGLArgs args, DGLRetValue* rv) {
*rv = runtime::Module(SystemLibModuleNode::Global());
});
} // namespace runtime
} // namespace tvm
} // namespace dgl
int TVMBackendRegisterSystemLibSymbol(const char* name, void* ptr) {
tvm::runtime::SystemLibModuleNode::Global()->RegisterSymbol(name, ptr);
int DGLBackendRegisterSystemLibSymbol(const char* name, void* ptr) {
dgl::runtime::SystemLibModuleNode::Global()->RegisterSymbol(name, ptr);
return 0;
}
......@@ -23,7 +23,7 @@
const constexpr int kL1CacheBytes = 64;
namespace tvm {
namespace dgl {
namespace runtime {
// stride in the page, fit to cache line.
......@@ -35,7 +35,7 @@ constexpr int kSyncStride = 64 / sizeof(std::atomic<int>);
class ParallelLauncher {
public:
// Reset the the task request.
void Init(FTVMParallelLambda flambda,
void Init(FDGLParallelLambda flambda,
void* cdata,
int num_task,
bool need_sync) {
......@@ -68,7 +68,7 @@ class ParallelLauncher {
// Wait n jobs to finish
int WaitForJobs() {
while (num_pending_.load() != 0) {
tvm::runtime::threading::Yield();
dgl::runtime::threading::Yield();
}
if (!has_error_.load()) return 0;
// the following is intended to use string due to
......@@ -80,13 +80,13 @@ class ParallelLauncher {
par_errors_[i].clear();
}
}
TVMAPISetLastError(err.c_str());
DGLAPISetLastError(err.c_str());
return -1;
}
// Signal that one job has finished.
void SignalJobError(int task_id) {
num_pending_.fetch_sub(1);
par_errors_[task_id] = TVMGetLastError();
par_errors_[task_id] = DGLGetLastError();
has_error_.store(true);
}
// Signal that one job has finished.
......@@ -98,11 +98,11 @@ class ParallelLauncher {
return dmlc::ThreadLocalStore<ParallelLauncher>::Get();
}
// The parallel lambda
FTVMParallelLambda flambda;
FDGLParallelLambda flambda;
// The closure data
void* cdata;
// Local env
TVMParallelGroupEnv env;
DGLParallelGroupEnv env;
// Whether this thread is worker of the pool.
// used to prevent recursive launch.
bool is_worker{false};
......@@ -143,7 +143,7 @@ class SpscTaskQueue {
*/
void Push(const Task& input) {
while (!Enqueue(input)) {
tvm::runtime::threading::Yield();
dgl::runtime::threading::Yield();
}
if (pending_.fetch_add(1) == -1) {
std::unique_lock<std::mutex> lock(mutex_);
......@@ -162,7 +162,7 @@ class SpscTaskQueue {
// If a new task comes to the queue quickly, this wait avoid the worker from sleeping.
// The default spin count is set by following the typical omp convention
for (uint32_t i = 0; i < spin_count && pending_.load() == 0; ++i) {
tvm::runtime::threading::Yield();
dgl::runtime::threading::Yield();
}
if (pending_.fetch_sub(1) == 0) {
std::unique_lock<std::mutex> lock(mutex_);
......@@ -243,13 +243,13 @@ class SpscTaskQueue {
// The thread pool
class ThreadPool {
public:
ThreadPool(): num_workers_(tvm::runtime::threading::MaxConcurrency()) {
ThreadPool(): num_workers_(dgl::runtime::threading::MaxConcurrency()) {
for (int i = 0; i < num_workers_; ++i) {
// The SpscTaskQueue only hosts ONE item at a time
queues_.emplace_back(std::unique_ptr<SpscTaskQueue>(new SpscTaskQueue()));
}
threads_ = std::unique_ptr<tvm::runtime::threading::ThreadGroup>(
new tvm::runtime::threading::ThreadGroup(
threads_ = std::unique_ptr<dgl::runtime::threading::ThreadGroup>(
new dgl::runtime::threading::ThreadGroup(
num_workers_, [this](int worker_id) { this->RunWorker(worker_id); },
exclude_worker0_ /* include_main_thread */));
num_workers_used_ = threads_->Configure(threading::ThreadGroup::kBig, 0, exclude_worker0_);
......@@ -260,7 +260,7 @@ class ThreadPool {
}
threads_.reset();
}
int Launch(FTVMParallelLambda flambda,
int Launch(FDGLParallelLambda flambda,
void* cdata,
int num_task,
int need_sync) {
......@@ -285,7 +285,7 @@ class ThreadPool {
}
// use the master thread to run task 0
if (exclude_worker0_) {
TVMParallelGroupEnv* penv = &(tsk.launcher->env);
DGLParallelGroupEnv* penv = &(tsk.launcher->env);
if ((*tsk.launcher->flambda)(0, penv, cdata) == 0) {
tsk.launcher->SignalJobFinish();
} else {
......@@ -318,7 +318,7 @@ class ThreadPool {
ParallelLauncher::ThreadLocal()->is_worker = true;
while (queue->Pop(&task)) {
CHECK(task.launcher != nullptr);
TVMParallelGroupEnv* penv = &(task.launcher->env);
DGLParallelGroupEnv* penv = &(task.launcher->env);
void* cdata = task.launcher->cdata;
if ((*task.launcher->flambda)(task.task_id, penv, cdata) == 0) {
task.launcher->SignalJobFinish();
......@@ -337,11 +337,11 @@ class ThreadPool {
bool exclude_worker0_{false};
#endif
std::vector<std::unique_ptr<SpscTaskQueue> > queues_;
std::unique_ptr<tvm::runtime::threading::ThreadGroup> threads_;
std::unique_ptr<dgl::runtime::threading::ThreadGroup> threads_;
};
TVM_REGISTER_GLOBAL("runtime.config_threadpool")
.set_body([](TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("runtime.config_threadpool")
.set_body([](DGLArgs args, DGLRetValue* rv) {
threading::ThreadGroup::AffinityMode mode =\
static_cast<threading::ThreadGroup::AffinityMode>(\
static_cast<int>(args[0]));
......@@ -351,20 +351,20 @@ TVM_REGISTER_GLOBAL("runtime.config_threadpool")
} // namespace runtime
} // namespace tvm
} // namespace dgl
int TVMBackendParallelLaunch(
FTVMParallelLambda flambda,
int DGLBackendParallelLaunch(
FDGLParallelLambda flambda,
void* cdata,
int num_task) {
int res = tvm::runtime::ThreadPool::ThreadLocal()->Launch(
int res = dgl::runtime::ThreadPool::ThreadLocal()->Launch(
flambda, cdata, num_task, 1);
return res;
}
int TVMBackendParallelBarrier(int task_id, TVMParallelGroupEnv* penv) {
using tvm::runtime::kSyncStride;
int DGLBackendParallelBarrier(int task_id, DGLParallelGroupEnv* penv) {
using dgl::runtime::kSyncStride;
int num_task = penv->num_task;
std::atomic<int>* sync_counter =
reinterpret_cast<std::atomic<int>*>(penv->sync_handle);
......@@ -374,7 +374,7 @@ int TVMBackendParallelBarrier(int task_id, TVMParallelGroupEnv* penv) {
if (i != task_id) {
while (sync_counter[i * kSyncStride].load(
std::memory_order_relaxed) <= old_counter) {
tvm::runtime::threading::Yield();
dgl::runtime::threading::Yield();
}
}
}
......
/*!
* Copyright (c) 2017 by Contributors
* \file thread_storage_scope.h
* \brief Extract thread axis configuration from TVMArgs.
* \brief Extract thread axis configuration from DGLArgs.
*/
#ifndef DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
#define DGL_RUNTIME_THREAD_STORAGE_SCOPE_H_
......@@ -10,7 +10,7 @@
#include <string>
#include <vector>
namespace tvm {
namespace dgl {
namespace runtime {
/*!
......@@ -170,7 +170,7 @@ class ThreadAxisConfig {
}
}
// extract workload from arguments.
ThreadWorkLoad Extract(TVMArgs x) const {
ThreadWorkLoad Extract(DGLArgs x) const {
ThreadWorkLoad w;
std::fill(w.work_size, w.work_size + 6, 1);
for (size_t i = 0; i < arg_index_map_.size(); ++i) {
......@@ -194,12 +194,12 @@ class ThreadAxisConfig {
};
} // namespace runtime
} // namespace tvm
} // namespace dgl
namespace std {
template <>
struct hash<::tvm::runtime::StorageScope> {
std::size_t operator()(const ::tvm::runtime::StorageScope& k) const {
struct hash<::dgl::runtime::StorageScope> {
std::size_t operator()(const ::dgl::runtime::StorageScope& k) const {
return static_cast<size_t>(k.rank);
}
};
......
......@@ -15,7 +15,7 @@
#include <sched.h>
#endif
namespace tvm {
namespace dgl {
namespace runtime {
namespace threading {
......@@ -60,7 +60,7 @@ class ThreadGroup::Impl {
// ones.
num_workers_used = std::min(num_workers_, num_workers_used);
const char *val = getenv("TVM_BIND_THREADS");
const char *val = getenv("DGL_BIND_THREADS");
if (val == nullptr || atoi(val) == 1) {
// Do not set affinity if there are more workers than found cores
if (sorted_order_.size() >= static_cast<unsigned int>(num_workers_)) {
......@@ -197,7 +197,7 @@ void Yield() {
int MaxConcurrency() {
int max_concurrency = 1;
const char *val = getenv("TVM_NUM_THREADS");
const char *val = getenv("DGL_NUM_THREADS");
if (val == nullptr) {
val = getenv("OMP_NUM_THREADS");
}
......@@ -215,4 +215,4 @@ int MaxConcurrency() {
} // namespace threading
} // namespace runtime
} // namespace tvm
} // namespace dgl
......@@ -5,7 +5,7 @@
*/
#include "workspace_pool.h"
namespace tvm {
namespace dgl {
namespace runtime {
// page size.
......@@ -23,12 +23,12 @@ class WorkspacePool::Pool {
allocated_.push_back(e);
}
// allocate from pool
void* Alloc(TVMContext ctx, DeviceAPI* device, size_t nbytes) {
void* Alloc(DGLContext ctx, DeviceAPI* device, size_t nbytes) {
// Allocate align to page.
nbytes = (nbytes + (kWorkspacePageSize - 1)) / kWorkspacePageSize * kWorkspacePageSize;
if (nbytes == 0) nbytes = kWorkspacePageSize;
Entry e;
TVMType type;
DGLType type;
type.code = kDLUInt;
type.bits = 8;
type.lanes = 1;
......@@ -92,7 +92,7 @@ class WorkspacePool::Pool {
}
}
// Release all resources
void Release(TVMContext ctx, DeviceAPI* device) {
void Release(DGLContext ctx, DeviceAPI* device) {
CHECK_EQ(allocated_.size(), 1);
for (size_t i = 1; i < free_list_.size(); ++i) {
device->FreeDataSpace(ctx, free_list_[i].data);
......@@ -119,7 +119,7 @@ WorkspacePool::WorkspacePool(DLDeviceType device_type, std::shared_ptr<DeviceAPI
WorkspacePool::~WorkspacePool() {
for (size_t i = 0; i < array_.size(); ++i) {
if (array_[i] != nullptr) {
TVMContext ctx;
DGLContext ctx;
ctx.device_type = device_type_;
ctx.device_id = static_cast<int>(i);
array_[i]->Release(ctx, device_.get());
......@@ -128,7 +128,7 @@ WorkspacePool::~WorkspacePool() {
}
}
void* WorkspacePool::AllocWorkspace(TVMContext ctx, size_t size) {
void* WorkspacePool::AllocWorkspace(DGLContext ctx, size_t size) {
if (static_cast<size_t>(ctx.device_id) >= array_.size()) {
array_.resize(ctx.device_id + 1, nullptr);
}
......@@ -138,11 +138,11 @@ void* WorkspacePool::AllocWorkspace(TVMContext ctx, size_t size) {
return array_[ctx.device_id]->Alloc(ctx, device_.get(), size);
}
void WorkspacePool::FreeWorkspace(TVMContext ctx, void* ptr) {
void WorkspacePool::FreeWorkspace(DGLContext ctx, void* ptr) {
CHECK(static_cast<size_t>(ctx.device_id) < array_.size() &&
array_[ctx.device_id] != nullptr);
array_[ctx.device_id]->Free(ptr);
}
} // namespace runtime
} // namespace tvm
} // namespace dgl
......@@ -9,7 +9,7 @@
#include <dgl/runtime/device_api.h>
#include <vector>
namespace tvm {
namespace dgl {
namespace runtime {
/*!
* \brief A workspace pool to manage
......@@ -37,14 +37,14 @@ class WorkspacePool {
* \param ctx The context of allocation.
* \param size The size to be allocated.
*/
void* AllocWorkspace(TVMContext ctx, size_t size);
void* AllocWorkspace(DGLContext ctx, size_t size);
/*!
* \brief Free temporal workspace in backend execution.
*
* \param ctx The context of allocation.
* \param ptr The pointer to be freed.
*/
void FreeWorkspace(TVMContext ctx, void* ptr);
void FreeWorkspace(DGLContext ctx, void* ptr);
private:
class Pool;
......@@ -57,5 +57,5 @@ class WorkspacePool {
};
} // namespace runtime
} // namespace tvm
} // namespace dgl
#endif // DGL_RUNTIME_WORKSPACE_POOL_H_
......@@ -7,14 +7,14 @@
#include <dgl/scheduler.h>
#include "../c_api_common.h"
using tvm::runtime::TVMArgs;
using tvm::runtime::TVMRetValue;
using tvm::runtime::NDArray;
using dgl::runtime::DGLArgs;
using dgl::runtime::DGLRetValue;
using dgl::runtime::NDArray;
namespace dgl {
TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketing")
.set_body([] (TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketing")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
const IdArray msg_ids = IdArray::FromDLPack(CreateTmpDLManagedTensor(args[0]));
const IdArray vids = IdArray::FromDLPack(CreateTmpDLManagedTensor(args[1]));
const IdArray nids = IdArray::FromDLPack(CreateTmpDLManagedTensor(args[2]));
......@@ -22,8 +22,8 @@ TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketing")
*rv = ConvertNDArrayVectorToPackedFunc(sched::DegreeBucketing(msg_ids, vids, nids));
});
TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForEdges")
.set_body([] (TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForEdges")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
const IdArray vids = IdArray::FromDLPack(CreateTmpDLManagedTensor(args[0]));
// XXX: better way to do arange?
int64_t n_msgs = vids->shape[0];
......@@ -35,8 +35,8 @@ TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForEdges")
*rv = ConvertNDArrayVectorToPackedFunc(sched::DegreeBucketing(msg_ids, vids, vids));
});
TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForRecvNodes")
.set_body([] (TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForRecvNodes")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
GraphHandle ghandle = args[0];
const Graph* gptr = static_cast<Graph*>(ghandle);
const IdArray vids = IdArray::FromDLPack(CreateTmpDLManagedTensor(args[1]));
......@@ -44,8 +44,8 @@ TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForRecvNod
*rv = ConvertNDArrayVectorToPackedFunc(sched::DegreeBucketing(edges.id, edges.dst, vids));
});
TVM_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForFullGraph")
.set_body([] (TVMArgs args, TVMRetValue* rv) {
DGL_REGISTER_GLOBAL("runtime.degree_bucketing._CAPI_DGLDegreeBucketingForFullGraph")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
GraphHandle ghandle = args[0];
const Graph* gptr = static_cast<Graph*>(ghandle);
const auto& edges = gptr->Edges(false);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment