Unverified Commit 21c4c29a authored by czkkkkkk's avatar czkkkkkk Committed by GitHub
Browse files

[Sparse] Reduce peak memory of SpSpAdd. (#5143)

* [Sparse] Reduce peak memory usage of SpSpAdd

* Update

* Update
parent b743f767
...@@ -22,9 +22,15 @@ c10::intrusive_ptr<SparseMatrix> SpSpAdd( ...@@ -22,9 +22,15 @@ c10::intrusive_ptr<SparseMatrix> SpSpAdd(
const c10::intrusive_ptr<SparseMatrix>& A, const c10::intrusive_ptr<SparseMatrix>& A,
const c10::intrusive_ptr<SparseMatrix>& B) { const c10::intrusive_ptr<SparseMatrix>& B) {
ElementwiseOpSanityCheck(A, B); ElementwiseOpSanityCheck(A, B);
torch::Tensor sum;
{
// TODO(#5145) This is a workaround to reduce peak memory usage. It is no
// longer needed after we address #5145.
auto torch_A = COOToTorchCOO(A->COOPtr(), A->value()); auto torch_A = COOToTorchCOO(A->COOPtr(), A->value());
auto torch_B = COOToTorchCOO(B->COOPtr(), B->value()); auto torch_B = COOToTorchCOO(B->COOPtr(), B->value());
auto sum = (torch_A + torch_B).coalesce(); sum = torch_A + torch_B;
}
sum = sum.coalesce();
auto indices = sum.indices(); auto indices = sum.indices();
auto row = indices[0]; auto row = indices[0];
auto col = indices[1]; auto col = indices[1];
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment