Commit 5549c70d authored by Lingfan Yu's avatar Lingfan Yu Committed by Minjie Wang
Browse files

[BugFix] Fix memory leak in DGL edge softmax module (#643)

* fix gat memory increase bug

* work around mxnet memory leak bug

* comments

* poke linter

* lint

* lint
parent ebbb6296
......@@ -310,6 +310,8 @@ class BinaryReduce(mx.autograd.Function):
zerocopy_to_dgl_ndarray_for_write(grad_rhs), self.lhs_map[1],
self.rhs_map[1], self.out_map[1])
grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)
# clear saved tensors explicitly
self.saved_tensors = None
return grad_lhs, grad_rhs
......@@ -351,6 +353,8 @@ class CopyReduce(mx.autograd.Function):
self.reducer, self.graph, self.target, in_data_nd, out_data_nd,
grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_in),
self.in_map[1], self.out_map[1])
# clear saved tensors explicitly
self.saved_tensors = None
return grad_in
......
......@@ -65,7 +65,9 @@ class EdgeSoftmax(mx.autograd.Function):
return grad_score.data
"""
g = self.g
out = self.saved_tensors[0]
out, = self.saved_tensors # pylint: disable=access-member-before-definition, unpacking-non-sequence
# clear saved tensors explicitly
self.saved_tensors = None
out_name = utils.get_edata_name(g, 'out')
accum_name = utils.get_ndata_name(g, 'accum')
grad_score_name = utils.get_edata_name(g, 'grad_score')
......
......@@ -62,6 +62,8 @@ class EdgeSoftmax(th.autograd.Function):
return grad_score.data
"""
g, out = ctx.backward_cache
# clear backward cache explicitly
ctx.backward_cache = None
out_name = utils.get_edata_name(g, 'out')
accum_name = utils.get_ndata_name(g, 'accum')
grad_score_name = utils.get_edata_name(g, 'grad_score')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment