Unverified Commit 9169fc35 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Sparse] Polish sparse matrix creation functions (#5093)



* update

* Update

* Update

* Update

* Update
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-36-188.ap-northeast-1.compute.internal>
parent 0698e91a
...@@ -49,7 +49,7 @@ class SparseMatrix : public torch::CustomClassHolder { ...@@ -49,7 +49,7 @@ class SparseMatrix : public torch::CustomClassHolder {
* *
* @return SparseMatrix * @return SparseMatrix
*/ */
static c10::intrusive_ptr<SparseMatrix> FromCOO( static c10::intrusive_ptr<SparseMatrix> FromCOOPointer(
const std::shared_ptr<COO>& coo, torch::Tensor value, const std::shared_ptr<COO>& coo, torch::Tensor value,
const std::vector<int64_t>& shape); const std::vector<int64_t>& shape);
...@@ -61,7 +61,7 @@ class SparseMatrix : public torch::CustomClassHolder { ...@@ -61,7 +61,7 @@ class SparseMatrix : public torch::CustomClassHolder {
* *
* @return SparseMatrix * @return SparseMatrix
*/ */
static c10::intrusive_ptr<SparseMatrix> FromCSR( static c10::intrusive_ptr<SparseMatrix> FromCSRPointer(
const std::shared_ptr<CSR>& csr, torch::Tensor value, const std::shared_ptr<CSR>& csr, torch::Tensor value,
const std::vector<int64_t>& shape); const std::vector<int64_t>& shape);
...@@ -73,10 +73,59 @@ class SparseMatrix : public torch::CustomClassHolder { ...@@ -73,10 +73,59 @@ class SparseMatrix : public torch::CustomClassHolder {
* *
* @return SparseMatrix * @return SparseMatrix
*/ */
static c10::intrusive_ptr<SparseMatrix> FromCSC( static c10::intrusive_ptr<SparseMatrix> FromCSCPointer(
const std::shared_ptr<CSR>& csc, torch::Tensor value, const std::shared_ptr<CSR>& csc, torch::Tensor value,
const std::vector<int64_t>& shape); const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from tensors in COO format.
* @param row Row indices of the COO.
* @param col Column indices of the COO.
* @param value Values of the sparse matrix.
* @param shape Shape of the sparse matrix.
*
* @return SparseMatrix
*/
static c10::intrusive_ptr<SparseMatrix> FromCOO(
torch::Tensor row, torch::Tensor col, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from tensors in CSR format.
* @param indptr Index pointer array of the CSR
* @param indices Indices array of the CSR
* @param value Values of the sparse matrix
* @param shape Shape of the sparse matrix
*
* @return SparseMatrix
*/
static c10::intrusive_ptr<SparseMatrix> FromCSR(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from tensors in CSC format.
* @param indptr Index pointer array of the CSC
* @param indices Indices array of the CSC
* @param value Values of the sparse matrix
* @param shape Shape of the sparse matrix
*
* @return SparseMatrix
*/
static c10::intrusive_ptr<SparseMatrix> FromCSC(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from a SparseMatrix using new values.
* @param mat An existing sparse matrix
* @param value New values of the sparse matrix
*
* @return SparseMatrix
*/
static c10::intrusive_ptr<SparseMatrix> ValLike(
const c10::intrusive_ptr<SparseMatrix>& mat, torch::Tensor value);
/** @return Value of the sparse matrix. */ /** @return Value of the sparse matrix. */
inline torch::Tensor value() const { return value_; } inline torch::Tensor value() const { return value_; }
/** @return Shape of the sparse matrix. */ /** @return Shape of the sparse matrix. */
...@@ -149,56 +198,6 @@ class SparseMatrix : public torch::CustomClassHolder { ...@@ -149,56 +198,6 @@ class SparseMatrix : public torch::CustomClassHolder {
// Shape of the SparseMatrix // Shape of the SparseMatrix
const std::vector<int64_t> shape_; const std::vector<int64_t> shape_;
}; };
/**
* @brief Create a SparseMatrix from tensors in COO format.
* @param row Row indices of the COO.
* @param col Column indices of the COO.
* @param value Values of the sparse matrix.
* @param shape Shape of the sparse matrix.
*
* @return SparseMatrix
*/
c10::intrusive_ptr<SparseMatrix> CreateFromCOO(
torch::Tensor row, torch::Tensor col, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from tensors in CSR format.
* @param indptr Index pointer array of the CSR
* @param indices Indices array of the CSR
* @param value Values of the sparse matrix
* @param shape Shape of the sparse matrix
*
* @return SparseMatrix
*/
c10::intrusive_ptr<SparseMatrix> CreateFromCSR(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from tensors in CSC format.
* @param indptr Index pointer array of the CSC
* @param indices Indices array of the CSC
* @param value Values of the sparse matrix
* @param shape Shape of the sparse matrix
*
* @return SparseMatrix
*/
c10::intrusive_ptr<SparseMatrix> CreateFromCSC(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape);
/**
* @brief Create a SparseMatrix from a SparseMatrix using new values.
* @param mat An existing sparse matrix
* @param value New values of the sparse matrix
*
* @return SparseMatrix
*/
c10::intrusive_ptr<SparseMatrix> CreateValLike(
const c10::intrusive_ptr<SparseMatrix>& mat, torch::Tensor value);
} // namespace sparse } // namespace sparse
} // namespace dgl } // namespace dgl
......
...@@ -28,7 +28,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpAdd( ...@@ -28,7 +28,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpAdd(
auto indices = sum.indices(); auto indices = sum.indices();
auto row = indices[0]; auto row = indices[0];
auto col = indices[1]; auto col = indices[1];
return CreateFromCOO(row, col, sum.values(), A->shape()); return SparseMatrix::FromCOO(row, col, sum.values(), A->shape());
} }
} // namespace sparse } // namespace sparse
......
...@@ -184,7 +184,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpMMNoAutoGrad( ...@@ -184,7 +184,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpMMNoAutoGrad(
runtime::NDArray ret_val; runtime::NDArray ret_val;
std::tie(ret_dgl_csr, ret_val) = std::tie(ret_dgl_csr, ret_val) =
aten::CSRMM(lhs_dgl_csr, lhs_dgl_val, rhs_dgl_csr, rhs_dgl_val); aten::CSRMM(lhs_dgl_csr, lhs_dgl_val, rhs_dgl_csr, rhs_dgl_val);
return SparseMatrix::FromCSR( return SparseMatrix::FromCSRPointer(
CSRFromOldDGLCSR(ret_dgl_csr), DGLArrayToTorchTensor(ret_val), ret_shape); CSRFromOldDGLCSR(ret_dgl_csr), DGLArrayToTorchTensor(ret_val), ret_shape);
} }
......
...@@ -32,9 +32,9 @@ TORCH_LIBRARY(dgl_sparse, m) { ...@@ -32,9 +32,9 @@ TORCH_LIBRARY(dgl_sparse, m) {
.def("transpose", &SparseMatrix::Transpose) .def("transpose", &SparseMatrix::Transpose)
.def("coalesce", &SparseMatrix::Coalesce) .def("coalesce", &SparseMatrix::Coalesce)
.def("has_duplicate", &SparseMatrix::HasDuplicate); .def("has_duplicate", &SparseMatrix::HasDuplicate);
m.def("create_from_coo", &CreateFromCOO) m.def("from_coo", &SparseMatrix::FromCOO)
.def("create_from_csr", &CreateFromCSR) .def("from_csr", &SparseMatrix::FromCSR)
.def("create_from_csc", &CreateFromCSC) .def("from_csc", &SparseMatrix::FromCSC)
.def("spsp_add", &SpSpAdd) .def("spsp_add", &SpSpAdd)
.def("reduce", &Reduce) .def("reduce", &Reduce)
.def("sum", &ReduceSum) .def("sum", &ReduceSum)
...@@ -42,7 +42,7 @@ TORCH_LIBRARY(dgl_sparse, m) { ...@@ -42,7 +42,7 @@ TORCH_LIBRARY(dgl_sparse, m) {
.def("smin", &ReduceMin) .def("smin", &ReduceMin)
.def("smax", &ReduceMax) .def("smax", &ReduceMax)
.def("sprod", &ReduceProd) .def("sprod", &ReduceProd)
.def("val_like", &CreateValLike) .def("val_like", &SparseMatrix::ValLike)
.def("spmm", &SpMM) .def("spmm", &SpMM)
.def("sddmm", &SDDMM) .def("sddmm", &SDDMM)
.def("softmax", &Softmax) .def("softmax", &Softmax)
......
...@@ -122,7 +122,7 @@ c10::intrusive_ptr<SparseMatrix> SDDMM( ...@@ -122,7 +122,7 @@ c10::intrusive_ptr<SparseMatrix> SDDMM(
sparse_val = sparse_val.unsqueeze(-1); sparse_val = sparse_val.unsqueeze(-1);
} }
val = val * sparse_val; val = val * sparse_val;
return CreateValLike(sparse_mat, val); return SparseMatrix::ValLike(sparse_mat, val);
} }
} // namespace sparse } // namespace sparse
......
...@@ -32,9 +32,10 @@ torch::Tensor SoftmaxAutoGrad::forward( ...@@ -32,9 +32,10 @@ torch::Tensor SoftmaxAutoGrad::forward(
auto sparse_val_max = ReduceMax(sparse_mat, 1); auto sparse_val_max = ReduceMax(sparse_mat, 1);
auto sparse_val_exp = auto sparse_val_exp =
BroadcastSubNoAutoGrad(sparse_mat, sparse_val_max).exp(); BroadcastSubNoAutoGrad(sparse_mat, sparse_val_max).exp();
auto sparse_val_sum = ReduceSum(CreateValLike(sparse_mat, sparse_val_exp), 1); auto sparse_val_sum =
ReduceSum(SparseMatrix::ValLike(sparse_mat, sparse_val_exp), 1);
auto sparse_score = BroadcastDivNoAutoGrad( auto sparse_score = BroadcastDivNoAutoGrad(
CreateValLike(sparse_mat, sparse_val_exp), sparse_val_sum); SparseMatrix::ValLike(sparse_mat, sparse_val_exp), sparse_val_sum);
const bool sparse_requires_grad = sparse_val.requires_grad(); const bool sparse_requires_grad = sparse_val.requires_grad();
torch::Tensor cache_sparse_score; torch::Tensor cache_sparse_score;
...@@ -61,9 +62,10 @@ tensor_list SoftmaxAutoGrad::backward( ...@@ -61,9 +62,10 @@ tensor_list SoftmaxAutoGrad::backward(
torch::Tensor sparse_val_grad; torch::Tensor sparse_val_grad;
if (sparse_requires_grad) { if (sparse_requires_grad) {
auto sds = sparse_score * output_grad; auto sds = sparse_score * output_grad;
auto accum = ReduceSum(CreateValLike(sparse_mat, sds), 1); auto accum = ReduceSum(SparseMatrix::ValLike(sparse_mat, sds), 1);
sparse_val_grad = sds - BroadcastMulNoAutoGrad( sparse_val_grad =
CreateValLike(sparse_mat, sparse_score), accum); sds - BroadcastMulNoAutoGrad(
SparseMatrix::ValLike(sparse_mat, sparse_score), accum);
} }
return {torch::Tensor(), sparse_val_grad}; return {torch::Tensor(), sparse_val_grad};
...@@ -77,7 +79,7 @@ c10::intrusive_ptr<SparseMatrix> Softmax( ...@@ -77,7 +79,7 @@ c10::intrusive_ptr<SparseMatrix> Softmax(
if (sparse_val.dim() == 1) { if (sparse_val.dim() == 1) {
sparse_val = sparse_val.view({-1, 1}); sparse_val = sparse_val.view({-1, 1});
expand_dim = true; expand_dim = true;
new_sparse_mat = CreateValLike(sparse_mat, sparse_val); new_sparse_mat = SparseMatrix::ValLike(sparse_mat, sparse_val);
} }
auto new_sparse_val = SoftmaxAutoGrad::apply(new_sparse_mat, sparse_val); auto new_sparse_val = SoftmaxAutoGrad::apply(new_sparse_mat, sparse_val);
...@@ -85,7 +87,7 @@ c10::intrusive_ptr<SparseMatrix> Softmax( ...@@ -85,7 +87,7 @@ c10::intrusive_ptr<SparseMatrix> Softmax(
if (expand_dim) { if (expand_dim) {
new_sparse_val = new_sparse_val.view(-1); new_sparse_val = new_sparse_val.view(-1);
} }
return CreateValLike(sparse_mat, new_sparse_val); return SparseMatrix::ValLike(sparse_mat, new_sparse_val);
} }
} // namespace sparse } // namespace sparse
......
...@@ -55,24 +55,68 @@ SparseMatrix::SparseMatrix( ...@@ -55,24 +55,68 @@ SparseMatrix::SparseMatrix(
} }
} }
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCOO( c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCOOPointer(
const std::shared_ptr<COO>& coo, torch::Tensor value, const std::shared_ptr<COO>& coo, torch::Tensor value,
const std::vector<int64_t>& shape) { const std::vector<int64_t>& shape) {
return c10::make_intrusive<SparseMatrix>(coo, nullptr, nullptr, value, shape); return c10::make_intrusive<SparseMatrix>(coo, nullptr, nullptr, value, shape);
} }
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSR( c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSRPointer(
const std::shared_ptr<CSR>& csr, torch::Tensor value, const std::shared_ptr<CSR>& csr, torch::Tensor value,
const std::vector<int64_t>& shape) { const std::vector<int64_t>& shape) {
return c10::make_intrusive<SparseMatrix>(nullptr, csr, nullptr, value, shape); return c10::make_intrusive<SparseMatrix>(nullptr, csr, nullptr, value, shape);
} }
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSC( c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSCPointer(
const std::shared_ptr<CSR>& csc, torch::Tensor value, const std::shared_ptr<CSR>& csc, torch::Tensor value,
const std::vector<int64_t>& shape) { const std::vector<int64_t>& shape) {
return c10::make_intrusive<SparseMatrix>(nullptr, nullptr, csc, value, shape); return c10::make_intrusive<SparseMatrix>(nullptr, nullptr, csc, value, shape);
} }
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCOO(
torch::Tensor row, torch::Tensor col, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto coo =
std::make_shared<COO>(COO{shape[0], shape[1], row, col, false, false});
return SparseMatrix::FromCOOPointer(coo, value, shape);
}
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSR(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto csr = std::make_shared<CSR>(
CSR{shape[0], shape[1], indptr, indices, torch::optional<torch::Tensor>(),
false});
return SparseMatrix::FromCSRPointer(csr, value, shape);
}
c10::intrusive_ptr<SparseMatrix> SparseMatrix::FromCSC(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto csc = std::make_shared<CSR>(
CSR{shape[1], shape[0], indptr, indices, torch::optional<torch::Tensor>(),
false});
return SparseMatrix::FromCSCPointer(csc, value, shape);
}
c10::intrusive_ptr<SparseMatrix> SparseMatrix::ValLike(
const c10::intrusive_ptr<SparseMatrix>& mat, torch::Tensor value) {
TORCH_CHECK(
mat->value().size(0) == value.size(0), "The first dimension of ",
"the old values and the new values must be the same.");
TORCH_CHECK(
mat->value().device() == value.device(), "The device of the ",
"old values and the new values must be the same.");
auto shape = mat->shape();
if (mat->HasCOO()) {
return SparseMatrix::FromCOOPointer(mat->COOPtr(), value, shape);
} else if (mat->HasCSR()) {
return SparseMatrix::FromCSRPointer(mat->CSRPtr(), value, shape);
} else {
return SparseMatrix::FromCSCPointer(mat->CSCPtr(), value, shape);
}
}
std::shared_ptr<COO> SparseMatrix::COOPtr() { std::shared_ptr<COO> SparseMatrix::COOPtr() {
if (coo_ == nullptr) { if (coo_ == nullptr) {
_CreateCOO(); _CreateCOO();
...@@ -119,11 +163,11 @@ c10::intrusive_ptr<SparseMatrix> SparseMatrix::Transpose() const { ...@@ -119,11 +163,11 @@ c10::intrusive_ptr<SparseMatrix> SparseMatrix::Transpose() const {
auto value = value_; auto value = value_;
if (HasCOO()) { if (HasCOO()) {
auto coo = COOTranspose(coo_); auto coo = COOTranspose(coo_);
return SparseMatrix::FromCOO(coo, value, shape); return SparseMatrix::FromCOOPointer(coo, value, shape);
} else if (HasCSR()) { } else if (HasCSR()) {
return SparseMatrix::FromCSC(csr_, value, shape); return SparseMatrix::FromCSCPointer(csr_, value, shape);
} else { } else {
return SparseMatrix::FromCSR(csc_, value, shape); return SparseMatrix::FromCSRPointer(csc_, value, shape);
} }
} }
...@@ -160,49 +204,5 @@ void SparseMatrix::_CreateCSC() { ...@@ -160,49 +204,5 @@ void SparseMatrix::_CreateCSC() {
} }
} }
c10::intrusive_ptr<SparseMatrix> CreateFromCOO(
torch::Tensor row, torch::Tensor col, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto coo =
std::make_shared<COO>(COO{shape[0], shape[1], row, col, false, false});
return SparseMatrix::FromCOO(coo, value, shape);
}
c10::intrusive_ptr<SparseMatrix> CreateFromCSR(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto csr = std::make_shared<CSR>(
CSR{shape[0], shape[1], indptr, indices, torch::optional<torch::Tensor>(),
false});
return SparseMatrix::FromCSR(csr, value, shape);
}
c10::intrusive_ptr<SparseMatrix> CreateFromCSC(
torch::Tensor indptr, torch::Tensor indices, torch::Tensor value,
const std::vector<int64_t>& shape) {
auto csc = std::make_shared<CSR>(
CSR{shape[1], shape[0], indptr, indices, torch::optional<torch::Tensor>(),
false});
return SparseMatrix::FromCSC(csc, value, shape);
}
c10::intrusive_ptr<SparseMatrix> CreateValLike(
const c10::intrusive_ptr<SparseMatrix>& mat, torch::Tensor value) {
TORCH_CHECK(
mat->value().size(0) == value.size(0), "The first dimension of ",
"the old values and the new values must be the same.");
TORCH_CHECK(
mat->value().device() == value.device(), "The device of the ",
"old values and the new values must be the same.");
auto shape = mat->shape();
if (mat->HasCOO()) {
return SparseMatrix::FromCOO(mat->COOPtr(), value, shape);
} else if (mat->HasCSR()) {
return SparseMatrix::FromCSR(mat->CSRPtr(), value, shape);
} else {
return SparseMatrix::FromCSC(mat->CSCPtr(), value, shape);
}
}
} // namespace sparse } // namespace sparse
} // namespace dgl } // namespace dgl
...@@ -20,7 +20,7 @@ c10::intrusive_ptr<SparseMatrix> SparseMatrix::Coalesce() { ...@@ -20,7 +20,7 @@ c10::intrusive_ptr<SparseMatrix> SparseMatrix::Coalesce() {
torch::Tensor indices = coalesced_coo.indices(); torch::Tensor indices = coalesced_coo.indices();
torch::Tensor row = indices[0]; torch::Tensor row = indices[0];
torch::Tensor col = indices[1]; torch::Tensor col = indices[1];
return CreateFromCOO(row, col, coalesced_coo.values(), this->shape()); return SparseMatrix::FromCOO(row, col, coalesced_coo.values(), this->shape());
} }
bool SparseMatrix::HasDuplicate() { bool SparseMatrix::HasDuplicate() {
......
...@@ -135,7 +135,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpMM( ...@@ -135,7 +135,7 @@ c10::intrusive_ptr<SparseMatrix> SpSpMM(
auto indptr = results[0]; auto indptr = results[0];
auto indices = results[1]; auto indices = results[1];
auto value = results[2]; auto value = results[2];
return CreateFromCSR(indptr, indices, value, ret_shape); return SparseMatrix::FromCSR(indptr, indices, value, ret_shape);
} }
} // namespace sparse } // namespace sparse
......
...@@ -18,9 +18,9 @@ Sparse matrix class ...@@ -18,9 +18,9 @@ Sparse matrix class
There are a few ways to create a sparse matrix: There are a few ways to create a sparse matrix:
* In COO format using row and col indices, use :func:`create_from_coo`. * In COO format using row and col indices, use :func:`from_coo`.
* In CSR format using row pointers and col indices, use :func:`create_from_csr`. * In CSR format using row pointers and col indices, use :func:`from_csr`.
* In CSC format using col pointers and row indices, use :func:`create_from_csc`. * In CSC format using col pointers and row indices, use :func:`from_csc`.
For example, one can create COO matrices as follows: For example, one can create COO matrices as follows:
...@@ -28,7 +28,7 @@ Sparse matrix class ...@@ -28,7 +28,7 @@ Sparse matrix class
>>> row = torch.tensor([1, 1, 2]) >>> row = torch.tensor([1, 1, 2])
>>> col = torch.tensor([2, 4, 3]) >>> col = torch.tensor([2, 4, 3])
>>> A = create_from_coo(row, col) >>> A = from_coo(row, col)
>>> A >>> A
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
...@@ -39,7 +39,7 @@ Sparse matrix class ...@@ -39,7 +39,7 @@ Sparse matrix class
>>> # vector values >>> # vector values
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3]]) >>> val = torch.tensor([[1, 1], [2, 2], [3, 3]])
>>> A = create_from_coo(row, col, val) >>> A = from_coo(row, col, val)
SparseMatrix(indices=tensor([[1, 1, 2], SparseMatrix(indices=tensor([[1, 1, 2],
[2, 4, 3]]), [2, 4, 3]]),
values=tensor([[1, 1], values=tensor([[1, 1],
...@@ -52,7 +52,7 @@ Sparse matrix class ...@@ -52,7 +52,7 @@ Sparse matrix class
>>> indptr = torch.tensor([0, 1, 2, 5]) >>> indptr = torch.tensor([0, 1, 2, 5])
>>> indices = torch.tensor([1, 2, 0, 1, 2]) >>> indices = torch.tensor([1, 2, 0, 1, 2])
>>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) >>> val = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
>>> A = create_from_csr(indptr, indices, val) >>> A = from_csr(indptr, indices, val)
>>> A >>> A
SparseMatrix(indices=tensor([[0, 1, 2, 2, 2], SparseMatrix(indices=tensor([[0, 1, 2, 2, 2],
[1, 2, 0, 1, 2]]), [1, 2, 0, 1, 2]]),
...@@ -69,9 +69,9 @@ Creators ...@@ -69,9 +69,9 @@ Creators
.. autosummary:: .. autosummary::
:toctree: ../../generated/ :toctree: ../../generated/
create_from_coo from_coo
create_from_csr from_csr
create_from_csc from_csc
val_like val_like
Attributes and methods Attributes and methods
......
...@@ -96,7 +96,7 @@ if __name__ == "__main__": ...@@ -96,7 +96,7 @@ if __name__ == "__main__":
# Create the sparse adjacency matrix A. # Create the sparse adjacency matrix A.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix. # Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev) I = dglsp.identity(A.shape, device=dev)
......
...@@ -101,7 +101,7 @@ if __name__ == "__main__": ...@@ -101,7 +101,7 @@ if __name__ == "__main__":
# Create the sparse adjacency matrix A. # Create the sparse adjacency matrix A.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix. # Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev) I = dglsp.identity(A.shape, device=dev)
......
...@@ -125,7 +125,7 @@ if __name__ == "__main__": ...@@ -125,7 +125,7 @@ if __name__ == "__main__":
# Create the sparse adjacency matrix A. # Create the sparse adjacency matrix A.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Add self-loops. # Add self-loops.
I = dglsp.identity(A.shape, device=dev) I = dglsp.identity(A.shape, device=dev)
......
...@@ -87,7 +87,7 @@ if __name__ == "__main__": ...@@ -87,7 +87,7 @@ if __name__ == "__main__":
# Create the adjacency matrix of graph. # Create the adjacency matrix of graph.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
############################################################################ ############################################################################
# (HIGHLIGHT) Compute the symmetrically normalized adjacency matrix with # (HIGHLIGHT) Compute the symmetrically normalized adjacency matrix with
......
...@@ -139,7 +139,7 @@ if __name__ == "__main__": ...@@ -139,7 +139,7 @@ if __name__ == "__main__":
# Create the adjacency matrix of graph. # Create the adjacency matrix of graph.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
############################################################################ ############################################################################
# (HIGHLIGHT) Compute the symmetrically normalized adjacency matrix with # (HIGHLIGHT) Compute the symmetrically normalized adjacency matrix with
......
...@@ -72,7 +72,7 @@ def load_data(): ...@@ -72,7 +72,7 @@ def load_data():
# We follow the paper and assume that the rows of the incidence matrix # We follow the paper and assume that the rows of the incidence matrix
# are for nodes and the columns are for edges. # are for nodes and the columns are for edges.
src, dst = graph.edges() src, dst = graph.edges()
H = dglsp.create_from_coo(dst, src) H = dglsp.from_coo(dst, src)
H = H + dglsp.identity(H.shape) H = H + dglsp.identity(H.shape)
X = graph.ndata["feat"] X = graph.ndata["feat"]
......
...@@ -101,7 +101,7 @@ def load_data(): ...@@ -101,7 +101,7 @@ def load_data():
# We follow the paper and assume that the rows of the incidence matrix # We follow the paper and assume that the rows of the incidence matrix
# are for nodes and the columns are for edges. # are for nodes and the columns are for edges.
src, dst = graph.edges() src, dst = graph.edges()
H = dglsp.create_from_coo(dst, src) H = dglsp.from_coo(dst, src)
H = H + dglsp.identity(H.shape) H = H + dglsp.identity(H.shape)
X = graph.ndata["feat"] X = graph.ndata["feat"]
......
...@@ -71,7 +71,7 @@ if __name__ == "__main__": ...@@ -71,7 +71,7 @@ if __name__ == "__main__":
# Create the sparse adjacency matrix A # Create the sparse adjacency matrix A
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix. # Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev) I = dglsp.identity(A.shape, device=dev)
......
...@@ -104,7 +104,7 @@ if __name__ == "__main__": ...@@ -104,7 +104,7 @@ if __name__ == "__main__":
# for adjacency matrix in the original paper). # for adjacency matrix in the original paper).
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix. # Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev) I = dglsp.identity(A.shape, device=dev)
......
...@@ -186,7 +186,7 @@ if __name__ == "__main__": ...@@ -186,7 +186,7 @@ if __name__ == "__main__":
# Create the sparse adjacency matrix A. # Create the sparse adjacency matrix A.
src, dst = g.edges() src, dst = g.edges()
N = g.num_nodes() N = g.num_nodes()
A = dglsp.create_from_coo(dst, src, shape=(N, N)) A = dglsp.from_coo(dst, src, shape=(N, N))
# Create the TWIRLS model. # Create the TWIRLS model.
in_size = X.shape[1] in_size = X.shape[1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment