Unverified Commit 00c09b9f authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

Revert "[bugfix] Explicitly unpin tensoradapter allocated arrays (#3997)" (#4061)

This reverts commit fdd1fe19.
parent c577dc9f
...@@ -415,8 +415,6 @@ struct NDArray::Container { ...@@ -415,8 +415,6 @@ struct NDArray::Container {
std::vector<int64_t> stride_; std::vector<int64_t> stride_;
/*! \brief The internal array object */ /*! \brief The internal array object */
std::atomic<int> ref_counter_{0}; std::atomic<int> ref_counter_{0};
bool from_tensor_dispatcher_{false};
}; };
// implementations of inline functions // implementations of inline functions
......
...@@ -78,12 +78,6 @@ struct NDArray::Internal { ...@@ -78,12 +78,6 @@ struct NDArray::Internal {
// This enables us to create NDArray from memory allocated by other // This enables us to create NDArray from memory allocated by other
// frameworks that are DLPack compatible // frameworks that are DLPack compatible
static void DLPackDeleter(NDArray::Container* ptr) { static void DLPackDeleter(NDArray::Container* ptr) {
if (ptr->from_tensor_dispatcher_) {
if (IsDataPinned(&(ptr->dl_tensor))) {
UnpinData(&(ptr->dl_tensor));
}
}
DLManagedTensor* tensor = static_cast<DLManagedTensor*>(ptr->manager_ctx); DLManagedTensor* tensor = static_cast<DLManagedTensor*>(ptr->manager_ctx);
if (tensor->deleter != nullptr) { if (tensor->deleter != nullptr) {
(*tensor->deleter)(tensor); (*tensor->deleter)(tensor);
...@@ -216,11 +210,8 @@ NDArray NDArray::Empty(std::vector<int64_t> shape, ...@@ -216,11 +210,8 @@ NDArray NDArray::Empty(std::vector<int64_t> shape,
DLDataType dtype, DLDataType dtype,
DLContext ctx) { DLContext ctx) {
TensorDispatcher* td = TensorDispatcher::Global(); TensorDispatcher* td = TensorDispatcher::Global();
if (td->IsAvailable()) { if (td->IsAvailable())
auto nd = td->Empty(shape, dtype, ctx); return td->Empty(shape, dtype, ctx);
nd.data_->from_tensor_dispatcher_ = true;
return nd;
}
NDArray ret = Internal::Create(shape, dtype, ctx); NDArray ret = Internal::Create(shape, dtype, ctx);
// setup memory content // setup memory content
......
import backend as F
import dgl
import gc
import unittest
import torch
@unittest.skipIf(F.ctx().type == 'cpu', reason='Pinning memory tests require GPU.')
def test_unpin_tensoradapater():
# run a sufficient number of iterations such that the memory pool should be
# re-used
for j in range(3):
t = F.zerocopy_from_dlpack(dgl.ndarray.empty(
[10000, 10],
F.reverse_data_type_dict[F.float32],
ctx=dgl.utils.to_dgl_context(torch.device('cpu'))).to_dlpack()).zero_()
assert not F.is_pinned(t)
dgl.utils.pin_memory_inplace(t)
assert F.is_pinned(t)
del t
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment