Unverified Commit cf8fea73 authored by Muhammed Fatih BALIN's avatar Muhammed Fatih BALIN Committed by GitHub
Browse files

[GraphBolt][CUDA] Hot fix after inplace pinning PR (#6974)

parent aad12df6
......@@ -34,16 +34,17 @@ class FusedCSCSamplingGraph(SamplingGraph):
):
super().__init__()
self._c_csc_graph = c_csc_graph
self._is_inplace_pinned = set()
def __del__(self):
# torch.Tensor.pin_memory() is not an inplace operation. To make it
# truly in-place, we need to use cudaHostRegister. Then, we need to use
# cudaHostUnregister to unpin the tensor in the destructor.
# https://github.com/pytorch/pytorch/issues/32167#issuecomment-753551842
if hasattr(self, "_is_inplace_pinned"):
for tensor in self._is_inplace_pinned:
assert (
torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr()) == 0
torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr())
== 0
)
@property
......@@ -990,6 +991,8 @@ class FusedCSCSamplingGraph(SamplingGraph):
# cudaHostUnregister to unpin the tensor in the destructor.
# https://github.com/pytorch/pytorch/issues/32167#issuecomment-753551842
cudart = torch.cuda.cudart()
if not hasattr(self, "_is_inplace_pinned"):
self._is_inplace_pinned = set()
def _pin(x):
if hasattr(x, "pin_memory_"):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment