Unverified Commit 2f41fcd9 authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

[Build] fix various build problems (#3117)

parent bb89dee7
...@@ -542,8 +542,9 @@ DGL_DLL int DGLStreamStreamSynchronize(int device_type, ...@@ -542,8 +542,9 @@ DGL_DLL int DGLStreamStreamSynchronize(int device_type,
/*! /*!
* \brief Load tensor adapter. * \brief Load tensor adapter.
* \return 0 when success, -1 when failure happens.
*/ */
DGL_DLL void DGLLoadTensorAdapter(const char *path); DGL_DLL int DGLLoadTensorAdapter(const char *path);
/*! /*!
* \brief Bug report macro. * \brief Bug report macro.
......
...@@ -65,7 +65,7 @@ class TensorDispatcher { ...@@ -65,7 +65,7 @@ class TensorDispatcher {
} }
/*! \brief Load symbols from the given tensor adapter library path */ /*! \brief Load symbols from the given tensor adapter library path */
void Load(const char *path_cstr); bool Load(const char *path_cstr);
/*! /*!
* \brief Allocate an empty tensor. * \brief Allocate an empty tensor.
......
...@@ -112,6 +112,7 @@ def decorate(func, fwrapped): ...@@ -112,6 +112,7 @@ def decorate(func, fwrapped):
import decorator import decorator
return decorator.decorate(func, fwrapped) return decorator.decorate(func, fwrapped)
tensor_adapter_loaded = False
def load_tensor_adapter(backend, version): def load_tensor_adapter(backend, version):
"""Tell DGL to load a tensoradapter library for given backend and version. """Tell DGL to load a tensoradapter library for given backend and version.
...@@ -123,6 +124,7 @@ def load_tensor_adapter(backend, version): ...@@ -123,6 +124,7 @@ def load_tensor_adapter(backend, version):
version : str version : str
The version number of the backend. The version number of the backend.
""" """
global tensor_adapter_loaded
version = version.split('+')[0] version = version.split('+')[0]
if sys.platform.startswith('linux'): if sys.platform.startswith('linux'):
basename = 'libtensoradapter_%s_%s.so' % (backend, version) basename = 'libtensoradapter_%s_%s.so' % (backend, version)
...@@ -133,4 +135,4 @@ def load_tensor_adapter(backend, version): ...@@ -133,4 +135,4 @@ def load_tensor_adapter(backend, version):
else: else:
raise NotImplementedError('Unsupported system: %s' % sys.platform) raise NotImplementedError('Unsupported system: %s' % sys.platform)
path = os.path.join(_DIR_NAME, 'tensoradapter', backend, basename) path = os.path.join(_DIR_NAME, 'tensoradapter', backend, basename)
_LIB.DGLLoadTensorAdapter(path.encode('utf-8')) tensor_adapter_loaded = (_LIB.DGLLoadTensorAdapter(path.encode('utf-8')) == 0)
...@@ -526,7 +526,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce, ...@@ -526,7 +526,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
(CUDART_VERSION < 11000) && (CUDART_VERSION < 11000) &&
((op == "copy_lhs" && cusparse_available<bits, IdType>()) || ((op == "copy_lhs" && cusparse_available<bits, IdType>()) ||
(op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>())); (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>()));
#if CUDART_VERSION < 11000
// Create temporary output buffer to store non-transposed output // Create temporary output buffer to store non-transposed output
if (use_legacy_cusparsemm) { if (use_legacy_cusparsemm) {
for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) {
...@@ -539,7 +538,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce, ...@@ -539,7 +538,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
trans_out[ntype] = out; trans_out[ntype] = out;
} }
} }
#endif
// Check shape of ufeat for all relation type and compute feature size // Check shape of ufeat for all relation type and compute feature size
int64_t x_length = 1; int64_t x_length = 1;
...@@ -634,7 +632,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce, ...@@ -634,7 +632,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
} }
} }
#if CUDART_VERSION < 11000
if (use_legacy_cusparsemm) { if (use_legacy_cusparsemm) {
// transpose output // transpose output
for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) { for (dgl_type_t ntype = 0; ntype < vec_out.size(); ++ntype) {
...@@ -646,7 +643,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce, ...@@ -646,7 +643,6 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce,
device->FreeWorkspace(vec_csr[0].indptr->ctx, trans_out[ntype]); device->FreeWorkspace(vec_csr[0].indptr->ctx, trans_out[ntype]);
} }
} }
#endif
}); });
} }
......
...@@ -379,8 +379,8 @@ int DGLCbArgToReturn(DGLValue* value, int code) { ...@@ -379,8 +379,8 @@ int DGLCbArgToReturn(DGLValue* value, int code) {
API_END(); API_END();
} }
void DGLLoadTensorAdapter(const char *path) { int DGLLoadTensorAdapter(const char *path) {
TensorDispatcher::Global()->Load(path); return TensorDispatcher::Global()->Load(path) ? 0 : -1;
} }
// set device api // set device api
......
...@@ -19,18 +19,18 @@ namespace runtime { ...@@ -19,18 +19,18 @@ namespace runtime {
constexpr const char *TensorDispatcher::names_[]; constexpr const char *TensorDispatcher::names_[];
void TensorDispatcher::Load(const char *path) { bool TensorDispatcher::Load(const char *path) {
CHECK(!available_) << "The tensor adapter can only load once."; CHECK(!available_) << "The tensor adapter can only load once.";
if (path == nullptr || strlen(path) == 0) if (path == nullptr || strlen(path) == 0)
// does not have dispatcher library; all operators fall back to DGL's implementation // does not have dispatcher library; all operators fall back to DGL's implementation
return; return false;
#if defined(WIN32) || defined(_WIN32) #if defined(WIN32) || defined(_WIN32)
handle_ = LoadLibrary(path); handle_ = LoadLibrary(path);
if (!handle_) if (!handle_)
return; return false;
for (int i = 0; i < num_entries_; ++i) { for (int i = 0; i < num_entries_; ++i) {
entrypoints_[i] = reinterpret_cast<void*>(GetProcAddress(handle_, names_[i])); entrypoints_[i] = reinterpret_cast<void*>(GetProcAddress(handle_, names_[i]));
...@@ -40,7 +40,7 @@ void TensorDispatcher::Load(const char *path) { ...@@ -40,7 +40,7 @@ void TensorDispatcher::Load(const char *path) {
handle_ = dlopen(path, RTLD_LAZY); handle_ = dlopen(path, RTLD_LAZY);
if (!handle_) if (!handle_)
return; return false;
for (int i = 0; i < num_entries_; ++i) { for (int i = 0; i < num_entries_; ++i) {
entrypoints_[i] = dlsym(handle_, names_[i]); entrypoints_[i] = dlsym(handle_, names_[i]);
...@@ -49,6 +49,7 @@ void TensorDispatcher::Load(const char *path) { ...@@ -49,6 +49,7 @@ void TensorDispatcher::Load(const char *path) {
#endif // WIN32 #endif // WIN32
available_ = true; available_ = true;
return true;
} }
TensorDispatcher::~TensorDispatcher() { TensorDispatcher::~TensorDispatcher() {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment