Unverified Commit 0bdc437b authored by Minjie Wang's avatar Minjie Wang Committed by GitHub
Browse files

[Doc] Update colab tutorials with spmatrix (#5220)

* Created using Colaboratory

* Created using Colaboratory

* Created using Colaboratory

* Created using Colaboratory

* Created using Colaboratory

* Created using Colaboratory
parent fbbe6d61
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
"id": "FTqB360eRvya", "id": "FTqB360eRvya",
"outputId": "df54b94e-fd1b-4b96-fca1-21948284254c" "outputId": "df54b94e-fd1b-4b96-fca1-21948284254c"
}, },
"execution_count": 2, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -71,7 +71,9 @@ ...@@ -71,7 +71,9 @@
"## Graph Convolutional Layer\n", "## Graph Convolutional Layer\n",
"\n", "\n",
"Mathematically, the graph convolutional layer is defined as:\n", "Mathematically, the graph convolutional layer is defined as:\n",
"$$f(X^{(l)}, A) = \\sigma(\\hat{D}^{-\\frac{1}{2}}\\hat{A}\\hat{D}^{-\\frac{1}{2}}X^{(l)}W^{(l)})$$", "\n",
"$$f(X^{(l)}, A) = \\sigma(\\hat{D}^{-\\frac{1}{2}}\\hat{A}\\hat{D}^{-\\frac{1}{2}}X^{(l)}W^{(l)})$$\n",
"\n",
"with $\\hat{A} = A + I$, where $A$ denotes the adjacency matrix and $I$ denotes the identity matrix, $\\hat{D}$ refers to the diagonal node degree matrix of $\\hat{A}$ and $W^{(l)}$ denotes a trainable weight matrix. $\\sigma$ refers to a non-linear activation (e.g. relu).\n", "with $\\hat{A} = A + I$, where $A$ denotes the adjacency matrix and $I$ denotes the identity matrix, $\\hat{D}$ refers to the diagonal node degree matrix of $\\hat{A}$ and $W^{(l)}$ denotes a trainable weight matrix. $\\sigma$ refers to a non-linear activation (e.g. relu).\n",
"\n", "\n",
"The code below shows how to implement it using the `dgl.sparse` package. The core operations are:\n", "The code below shows how to implement it using the `dgl.sparse` package. The core operations are:\n",
...@@ -114,7 +116,7 @@ ...@@ -114,7 +116,7 @@
"metadata": { "metadata": {
"id": "Y4I4EhHQ_kKb" "id": "Y4I4EhHQ_kKb"
}, },
"execution_count": 3, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -144,7 +146,7 @@ ...@@ -144,7 +146,7 @@
"metadata": { "metadata": {
"id": "BHX3vRjDWJTO" "id": "BHX3vRjDWJTO"
}, },
"execution_count": 4, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -179,9 +181,9 @@ ...@@ -179,9 +181,9 @@
" loss_fcn = nn.CrossEntropyLoss()\n", " loss_fcn = nn.CrossEntropyLoss()\n",
"\n", "\n",
" # Preprocess to get the adjacency matrix of the graph.\n", " # Preprocess to get the adjacency matrix of the graph.\n",
" src, dst = g.edges()\n", " indices = torch.stack(g.edges())\n",
" N = g.num_nodes()\n", " N = g.num_nodes()\n",
" A = dglsp.from_coo(dst, src, shape=(N, N))\n", " A = dglsp.spmatrix(indices, shape=(N, N))\n",
"\n", "\n",
" for epoch in range(100):\n", " for epoch in range(100):\n",
" model.train()\n", " model.train()\n",
...@@ -229,7 +231,7 @@ ...@@ -229,7 +231,7 @@
}, },
"outputId": "552e2c22-44f4-4495-c7f9-a57f13484270" "outputId": "552e2c22-44f4-4495-c7f9-a57f13484270"
}, },
"execution_count": 5, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -280,4 +282,4 @@ ...@@ -280,4 +282,4 @@
} }
} }
] ]
} }
\ No newline at end of file
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"metadata": { "metadata": {
"id": "F6eQWmWn7lqh", "id": "F6eQWmWn7lqh",
"colab": { "colab": {
...@@ -101,9 +101,9 @@ ...@@ -101,9 +101,9 @@
"dgl_g = dataset[0]\n", "dgl_g = dataset[0]\n",
"\n", "\n",
"# Get its adjacency matrix.\n", "# Get its adjacency matrix.\n",
"src, dst = dgl_g.edges()\n", "indices = torch.stack(dgl_g.edges())\n",
"N = dgl_g.num_nodes()\n", "N = dgl_g.num_nodes()\n",
"A = dglsp.from_coo(dst, src, shape=(N, N))\n", "A = dglsp.spmatrix(indices, shape=(N, N))\n",
"print(A.to_dense())" "print(A.to_dense())"
], ],
"metadata": { "metadata": {
...@@ -113,7 +113,7 @@ ...@@ -113,7 +113,7 @@
"id": "_TnCECJmBKJE", "id": "_TnCECJmBKJE",
"outputId": "d8b78f0b-3a1c-4a9e-bcc9-ed4df7b7b5b7" "outputId": "d8b78f0b-3a1c-4a9e-bcc9-ed4df7b7b5b7"
}, },
"execution_count": 2, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -134,7 +134,9 @@ ...@@ -134,7 +134,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"We use the graph convolution matrix from Graph Convolution Networks as the diffusion matrix in this example. The graph convolution matrix is defined as:\n", "We use the graph convolution matrix from Graph Convolution Networks as the diffusion matrix in this example. The graph convolution matrix is defined as:\n",
"$$\\tilde{A} = \\hat{D}^{-\\frac{1}{2}}\\hat{A}\\hat{D}^{-\\frac{1}{2}}$$", "\n",
"$$\\tilde{A} = \\hat{D}^{-\\frac{1}{2}}\\hat{A}\\hat{D}^{-\\frac{1}{2}}$$\n",
"\n",
"with $\\hat{A} = A + I$, where $A$ denotes the adjacency matrix and $I$ denotes the identity matrix, $\\hat{D}$ refers to the diagonal node degree matrix of $\\hat{A}$." "with $\\hat{A} = A + I$, where $A$ denotes the adjacency matrix and $I$ denotes the identity matrix, $\\hat{D}$ refers to the diagonal node degree matrix of $\\hat{A}$."
], ],
"metadata": { "metadata": {
...@@ -159,7 +161,7 @@ ...@@ -159,7 +161,7 @@
"id": "JyzctBGaC_O5", "id": "JyzctBGaC_O5",
"outputId": "b03ef3dc-dcf5-494e-9191-30591d09f138" "outputId": "b03ef3dc-dcf5-494e-9191-30591d09f138"
}, },
"execution_count": 3, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -204,7 +206,7 @@ ...@@ -204,7 +206,7 @@
"metadata": { "metadata": {
"id": "DXb0uKqXDZKb" "id": "DXb0uKqXDZKb"
}, },
"execution_count": 4, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -249,7 +251,7 @@ ...@@ -249,7 +251,7 @@
}, },
"outputId": "be93263e-2283-4db7-caff-2e15e75ceb02" "outputId": "be93263e-2283-4db7-caff-2e15e75ceb02"
}, },
"execution_count": 5, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "execute_result", "output_type": "execute_result",
...@@ -5601,7 +5603,7 @@ ...@@ -5601,7 +5603,7 @@
"metadata": { "metadata": {
"id": "__U3Hsp_S0SR" "id": "__U3Hsp_S0SR"
}, },
"execution_count": 6, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -5678,9 +5680,9 @@ ...@@ -5678,9 +5680,9 @@
"\n", "\n",
"# Create the sparse adjacency matrix A (note that W was used as the notation\n", "# Create the sparse adjacency matrix A (note that W was used as the notation\n",
"# for adjacency matrix in the original paper).\n", "# for adjacency matrix in the original paper).\n",
"src, dst = g.edges()\n", "indices = torch.stack(g.edges())\n",
"N = g.num_nodes()\n", "N = g.num_nodes()\n",
"A = dglsp.from_coo(dst, src, shape=(N, N))\n", "A = dglsp.spmatrix(indices, shape=(N, N))\n",
"\n", "\n",
"# Calculate the graph convolution matrix.\n", "# Calculate the graph convolution matrix.\n",
"I = dglsp.identity(A.shape, device=dev)\n", "I = dglsp.identity(A.shape, device=dev)\n",
...@@ -5708,7 +5710,7 @@ ...@@ -5708,7 +5710,7 @@
}, },
"outputId": "19e86f6a-c7f1-4b40-8cfc-58a181fc30d7" "outputId": "19e86f6a-c7f1-4b40-8cfc-58a181fc30d7"
}, },
"execution_count": 7, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -5754,4 +5756,4 @@ ...@@ -5754,4 +5756,4 @@
} }
} }
] ]
} }
\ No newline at end of file
...@@ -227,9 +227,9 @@ ...@@ -227,9 +227,9 @@
" )\n", " )\n",
"\n", "\n",
" def forward(self, g, X, pos_enc):\n", " def forward(self, g, X, pos_enc):\n",
" src, dst = g.edges()\n", " indices = torch.stack(g.edges())\n",
" N = g.num_nodes()\n", " N = g.num_nodes()\n",
" A = dglsp.from_coo(dst, src, shape=(N, N))\n", " A = dglsp.spmatrix(indices, shape=(N, N))\n",
" h = self.atom_encoder(X) + self.pos_linear(pos_enc)\n", " h = self.atom_encoder(X) + self.pos_linear(pos_enc)\n",
" for layer in self.layers:\n", " for layer in self.layers:\n",
" h = layer(A, h)\n", " h = layer(A, h)\n",
...@@ -418,4 +418,4 @@ ...@@ -418,4 +418,4 @@
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 0 "nbformat_minor": 0
} }
\ No newline at end of file
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
"id": "__2tKqL0eaB0", "id": "__2tKqL0eaB0",
"outputId": "5b5106f6-074b-42a5-fc4c-4936efd2cef8" "outputId": "5b5106f6-074b-42a5-fc4c-4936efd2cef8"
}, },
"execution_count": 1, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -108,9 +108,9 @@ ...@@ -108,9 +108,9 @@
"import dgl.sparse as dglsp\n", "import dgl.sparse as dglsp\n",
"import torch\n", "import torch\n",
"\n", "\n",
"H = dglsp.from_coo(\n", "H = dglsp.spmatrix(\n",
" torch.LongTensor([0, 1, 2, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10]),\n", " torch.LongTensor([[0, 1, 2, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10],\n",
" torch.LongTensor([0, 0, 0, 1, 3, 4, 2, 1, 0, 2, 3, 4, 2, 1, 3, 1, 3, 2, 4, 4])\n", " [0, 0, 0, 1, 3, 4, 2, 1, 0, 2, 3, 4, 2, 1, 3, 1, 3, 2, 4, 4]])\n",
")\n", ")\n",
"\n", "\n",
"print(H.to_dense())" "print(H.to_dense())"
...@@ -122,7 +122,7 @@ ...@@ -122,7 +122,7 @@
}, },
"outputId": "a1a576f6-1559-479c-9f3e-93e41a56833d" "outputId": "a1a576f6-1559-479c-9f3e-93e41a56833d"
}, },
"execution_count": 2, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -168,7 +168,7 @@ ...@@ -168,7 +168,7 @@
}, },
"outputId": "ffe2c441-8c2c-48a7-cef2-4ef6e96548ec" "outputId": "ffe2c441-8c2c-48a7-cef2-4ef6e96548ec"
}, },
"execution_count": 3, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -187,8 +187,9 @@ ...@@ -187,8 +187,9 @@
"## Hypergraph Neural Network (HGNN) Layer\n", "## Hypergraph Neural Network (HGNN) Layer\n",
"\n", "\n",
"The [HGNN layer](https://arxiv.org/pdf/1809.09401.pdf) is defined as:\n", "The [HGNN layer](https://arxiv.org/pdf/1809.09401.pdf) is defined as:\n",
"$$f(X^{(l)}, H; W^{(l)}) = \\sigma(L X^{(l)} W^{(l)})$$", "\n",
"$$L = D_v^{-1/2} H B D_e^{-1} H^\\top D_v^{-1/2}$$", "$$f(X^{(l)}, H; W^{(l)}) = \\sigma(L X^{(l)} W^{(l)})$$$$L = D_v^{-1/2} H B D_e^{-1} H^\\top D_v^{-1/2}$$\n",
"\n",
"where\n", "where\n",
"* $H \\in \\mathbb{R}^{N \\times M}$ is the incidence matrix of hypergraph with $N$ nodes and $M$ hyperedges.\n", "* $H \\in \\mathbb{R}^{N \\times M}$ is the incidence matrix of hypergraph with $N$ nodes and $M$ hyperedges.\n",
"* $D_v \\in \\mathbb{R}^{N \\times N}$ is a diagonal matrix representing node degrees, whose $i$-th diagonal element is $\\sum_{j=1}^M H_{ij}$.\n", "* $D_v \\in \\mathbb{R}^{N \\times N}$ is a diagonal matrix representing node degrees, whose $i$-th diagonal element is $\\sum_{j=1}^M H_{ij}$.\n",
...@@ -247,7 +248,7 @@ ...@@ -247,7 +248,7 @@
"metadata": { "metadata": {
"id": "58WnPtPvT2mx" "id": "58WnPtPvT2mx"
}, },
"execution_count": 4, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -272,8 +273,8 @@ ...@@ -272,8 +273,8 @@
" dataset = CoraGraphDataset()\n", " dataset = CoraGraphDataset()\n",
"\n", "\n",
" graph = dataset[0]\n", " graph = dataset[0]\n",
" src, dst = graph.edges()\n", " indices = torch.stack(graph.edges())\n",
" H = dglsp.from_coo(dst, src)\n", " H = dglsp.spmatrix(indices)\n",
" H = H + dglsp.identity(H.shape)\n", " H = H + dglsp.identity(H.shape)\n",
"\n", "\n",
" X = graph.ndata[\"feat\"]\n", " X = graph.ndata[\"feat\"]\n",
...@@ -286,7 +287,7 @@ ...@@ -286,7 +287,7 @@
"metadata": { "metadata": {
"id": "qI0j1J9pwTFg" "id": "qI0j1J9pwTFg"
}, },
"execution_count": 5, "execution_count": null,
"outputs": [] "outputs": []
}, },
{ {
...@@ -354,7 +355,7 @@ ...@@ -354,7 +355,7 @@
"id": "IfEc6JRXwHPt", "id": "IfEc6JRXwHPt",
"outputId": "0172578a-6a1b-49eb-adcb-77ee1a949186" "outputId": "0172578a-6a1b-49eb-adcb-77ee1a949186"
}, },
"execution_count": 6, "execution_count": null,
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
...@@ -406,4 +407,4 @@ ...@@ -406,4 +407,4 @@
} }
} }
] ]
} }
\ No newline at end of file
...@@ -75,17 +75,9 @@ ...@@ -75,17 +75,9 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"### Initializing a DGL Sparse Matrix\n", "### Creating a DGL Sparse Matrix\n",
"\n", "\n",
"A DGL Sparse Matrix can be initialized in various ways.\n", "The simplest way to create a sparse matrix is using the `spmatrix` API by providing the indices of the non-zero elements. The indices are stored in a tensor of shape `(2, nnz)`, where the `i`-th non-zero element is stored at position `(indices[0][i], indices[1][i])`. The code below creates a 3x3 sparse matrix.\n"
"\n",
"* `from_coo()`\n",
"* `from_csr()`\n",
"* `from_csc()`\n",
"\n",
"Take a look at the following examples:\n",
"\n",
"From [COO format](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO))"
], ],
"metadata": { "metadata": {
"id": "_q4HYodcWenB" "id": "_q4HYodcWenB"
...@@ -102,9 +94,10 @@ ...@@ -102,9 +94,10 @@
"import torch\n", "import torch\n",
"import dgl.sparse as dglsp\n", "import dgl.sparse as dglsp\n",
"\n", "\n",
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"A = dglsp.from_coo(row, col) # 1.0 is default value for nnz elements.\n", "A = dglsp.spmatrix(i) # 1.0 is default value for nnz elements.\n",
"\n",
"print(A)\n", "print(A)\n",
"print(\"\")\n", "print(\"\")\n",
"print(\"In dense format:\")\n", "print(\"In dense format:\")\n",
...@@ -114,7 +107,7 @@ ...@@ -114,7 +107,7 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"*Compare implicit shape vs explicit shape*" "If not specified, the shape is inferred automatically from the indices but you can specify it explicitly too."
], ],
"metadata": { "metadata": {
"id": "W1JJg-eZ7K3t" "id": "W1JJg-eZ7K3t"
...@@ -123,15 +116,15 @@ ...@@ -123,15 +116,15 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 0, 1])\n", "i = torch.tensor([[0, 0, 1],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"\n", "\n",
"A1 = dglsp.from_coo(row, col)\n", "A1 = dglsp.spmatrix(i)\n",
"print(f\"Implicit Shape: {A1.shape}\")\n", "print(f\"Implicit Shape: {A1.shape}\")\n",
"print(A1.to_dense())\n", "print(A1.to_dense())\n",
"print(\"\")\n", "print(\"\")\n",
"\n", "\n",
"A2 = dglsp.from_coo(row, col, shape=(3, 3))\n", "A2 = dglsp.spmatrix(i, shape=(3, 3))\n",
"print(f\"Explicit Shape: {A2.shape}\")\n", "print(f\"Explicit Shape: {A2.shape}\")\n",
"print(A2.to_dense())" "print(A2.to_dense())"
], ],
...@@ -153,15 +146,15 @@ ...@@ -153,15 +146,15 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"# The length of the value should match the nnz elements represented by the\n", "# The length of the value should match the nnz elements represented by the\n",
"# sparse matrix format.\n", "# sparse matrix format.\n",
"scalar_val = torch.tensor([1., 2., 3.])\n", "scalar_val = torch.tensor([1., 2., 3.])\n",
"vector_val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n", "vector_val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n",
"\n", "\n",
"print(\"-----Scalar Values-----\")\n", "print(\"-----Scalar Values-----\")\n",
"A = dglsp.from_coo(row, col, scalar_val)\n", "A = dglsp.spmatrix(i, scalar_val)\n",
"print(A)\n", "print(A)\n",
"print(\"\")\n", "print(\"\")\n",
"print(\"In dense format:\")\n", "print(\"In dense format:\")\n",
...@@ -169,7 +162,7 @@ ...@@ -169,7 +162,7 @@
"print(\"\")\n", "print(\"\")\n",
"\n", "\n",
"print(\"-----Vector Values-----\")\n", "print(\"-----Vector Values-----\")\n",
"A = dglsp.from_coo(row, col, vector_val)\n", "A = dglsp.spmatrix(i, vector_val)\n",
"print(A)\n", "print(A)\n",
"print(\"\")\n", "print(\"\")\n",
"print(\"In dense format:\")\n", "print(\"In dense format:\")\n",
...@@ -193,10 +186,10 @@ ...@@ -193,10 +186,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 0, 0, 1])\n", "i = torch.tensor([[0, 0, 0, 1],\n",
"col = torch.tensor([0, 2, 2, 0])\n", " [0, 2, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4])\n", "val = torch.tensor([1., 2., 3., 4])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"print(A)\n", "print(A)\n",
"print(f\"Whether A contains duplicate indices: {A.has_duplicate()}\")\n", "print(f\"Whether A contains duplicate indices: {A.has_duplicate()}\")\n",
"print(\"\")\n", "print(\"\")\n",
...@@ -214,35 +207,28 @@ ...@@ -214,35 +207,28 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"From [CSR format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format)) and [CSC format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))" "**val_like**\n",
"\n",
"You can create a new sparse matrix by retaining the non-zero indices of a given sparse matrix but with different non-zero values."
], ],
"metadata": { "metadata": {
"id": "XfnW7kGnd1lF" "id": "ZJ09qM5NaxuI"
} }
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"indptr = torch.tensor([0, 1, 2, 5])\n", "i = torch.tensor([[1, 1, 2],\n",
"indices = torch.tensor([1, 2, 0, 1, 2])\n", " [0, 2, 0]])\n",
"\n", "val = torch.tensor([1., 2., 3.])\n",
"print(\"-----Create from CSR format-----\")\n", "A = dglsp.spmatrix(i, val)\n",
"A = dglsp.from_csr(indptr, indices)\n",
"print(A)\n",
"print(\"\")\n",
"print(\"In dense format:\")\n",
"print(A.to_dense())\n",
"print(\"\")\n",
"\n", "\n",
"print(\"-----Create from CSC format-----\")\n", "new_val = torch.tensor([4., 5., 6.])\n",
"B = dglsp.from_csc(indptr, indices)\n", "B = dglsp.val_like(A, new_val)\n",
"print(B)\n", "print(B)"
"print(\"\")\n",
"print(\"In dense format:\")\n",
"print(B.to_dense())"
], ],
"metadata": { "metadata": {
"id": "3puXyMFsvdlj" "id": "UB3lKJVBbsUD"
}, },
"execution_count": null, "execution_count": null,
"outputs": [] "outputs": []
...@@ -250,28 +236,50 @@ ...@@ -250,28 +236,50 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"**val_like**\n", "**Create a sparse matrix from various sparse formats**\n",
"\n", "\n",
"Similar to pytorch, we can create a Sparse Matrix with new values, the same nonzero indices as the given sparse matrix." "* `from_coo()`: Create a sparse matrix from [COO](https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)) format.\n",
"* `from_csr()`: Create a sparse matrix from [CSR](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format)) format.\n",
"* `from_csc()`: Create a sparse matrix from [CSC](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS)) format."
], ],
"metadata": { "metadata": {
"id": "ZJ09qM5NaxuI" "id": "nWjBSFDBXDPJ"
} }
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "row = torch.tensor([0, 1, 2, 2, 2])\n",
"col = torch.tensor([0, 2, 0])\n", "col = torch.tensor([1, 2, 0, 1, 2])\n",
"val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val)\n",
"\n", "\n",
"new_val = torch.tensor([4., 5., 6.])\n", "print(\"-----Create from COO format-----\")\n",
"B = dglsp.val_like(A, new_val)\n", "A = dglsp.from_coo(row, col)\n",
"print(B)" "print(A)\n",
"print(\"\")\n",
"print(\"In dense format:\")\n",
"print(A.to_dense())\n",
"print(\"\")\n",
"\n",
"indptr = torch.tensor([0, 1, 2, 5])\n",
"indices = torch.tensor([1, 2, 0, 1, 2])\n",
"\n",
"print(\"-----Create from CSR format-----\")\n",
"A = dglsp.from_csr(indptr, indices)\n",
"print(A)\n",
"print(\"\")\n",
"print(\"In dense format:\")\n",
"print(A.to_dense())\n",
"print(\"\")\n",
"\n",
"print(\"-----Create from CSC format-----\")\n",
"B = dglsp.from_csc(indptr, indices)\n",
"print(B)\n",
"print(\"\")\n",
"print(\"In dense format:\")\n",
"print(B.to_dense())"
], ],
"metadata": { "metadata": {
"id": "UB3lKJVBbsUD" "id": "3puXyMFsvdlj"
}, },
"execution_count": null, "execution_count": null,
"outputs": [] "outputs": []
...@@ -288,10 +296,10 @@ ...@@ -288,10 +296,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4.])\n", "val = torch.tensor([1., 2., 3., 4.])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"\n", "\n",
"print(f\"Shape of sparse matrix: {A.shape}\")\n", "print(f\"Shape of sparse matrix: {A.shape}\")\n",
"print(f\"The number of nonzero elements of sparse matrix: {A.nnz}\")\n", "print(f\"The number of nonzero elements of sparse matrix: {A.nnz}\")\n",
...@@ -322,10 +330,10 @@ ...@@ -322,10 +330,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4.])\n", "val = torch.tensor([1., 2., 3., 4.])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"\n", "\n",
"B = A.to(device='cpu', dtype=torch.int32)\n", "B = A.to(device='cpu', dtype=torch.int32)\n",
"print(f\"Device sparse matrix is stored on: {B.device}\")\n", "print(f\"Device sparse matrix is stored on: {B.device}\")\n",
...@@ -465,9 +473,9 @@ ...@@ -465,9 +473,9 @@
"\n", "\n",
"A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n", "A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n",
"----------------|---------------|----------------|----------\n", "----------------|---------------|----------------|----------\n",
"**DiagMatrix** | | |🚫\n", "**DiagMatrix** |✔️ |✔️ |\n",
"**SparseMatrix**| | |🚫\n", "**SparseMatrix**|✔️ |✔️ |\n",
"**scalar** |🚫 |🚫 |🚫" "**scalar** | | |"
], ],
"metadata": { "metadata": {
"id": "39YJitpW-K9v" "id": "39YJitpW-K9v"
...@@ -476,17 +484,17 @@ ...@@ -476,17 +484,17 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A1 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A1 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A1:\")\n", "print(\"A1:\")\n",
"print(A1.to_dense())\n", "print(A1.to_dense())\n",
"\n", "\n",
"row = torch.tensor([0, 1, 2])\n", "i = torch.tensor([[0, 1, 2],\n",
"col = torch.tensor([0, 2, 1])\n", " [0, 2, 1]])\n",
"val = torch.tensor([4., 5., 6.])\n", "val = torch.tensor([4., 5., 6.])\n",
"A2 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A2 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A2:\")\n", "print(\"A2:\")\n",
"print(A2.to_dense())\n", "print(A2.to_dense())\n",
"\n", "\n",
...@@ -524,9 +532,9 @@ ...@@ -524,9 +532,9 @@
"\n", "\n",
"A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n", "A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n",
"----------------|---------------|----------------|----------\n", "----------------|---------------|----------------|----------\n",
"**DiagMatrix** | | |🚫\n", "**DiagMatrix** |✔️ |✔️ |\n",
"**SparseMatrix**| | |🚫\n", "**SparseMatrix**|✔️ |✔️ |\n",
"**scalar** |🚫 |🚫 |🚫" "**scalar** | | |"
], ],
"metadata": { "metadata": {
"id": "i25N0JHUTUX9" "id": "i25N0JHUTUX9"
...@@ -535,17 +543,17 @@ ...@@ -535,17 +543,17 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A1 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A1 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A1:\")\n", "print(\"A1:\")\n",
"print(A1.to_dense())\n", "print(A1.to_dense())\n",
"\n", "\n",
"row = torch.tensor([0, 1, 2])\n", "i = torch.tensor([[0, 1, 2],\n",
"col = torch.tensor([0, 2, 1])\n", " [0, 2, 1]])\n",
"val = torch.tensor([4., 5., 6.])\n", "val = torch.tensor([4., 5., 6.])\n",
"A2 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A2 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A2:\")\n", "print(\"A2:\")\n",
"print(A2.to_dense())\n", "print(A2.to_dense())\n",
"\n", "\n",
...@@ -586,9 +594,9 @@ ...@@ -586,9 +594,9 @@
"\n", "\n",
"A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n", "A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n",
"----------------|---------------|----------------|----------\n", "----------------|---------------|----------------|----------\n",
"**DiagMatrix** | |🚫 |\n", "**DiagMatrix** |✔️ | |✔️\n",
"**SparseMatrix**|🚫 |🚫 |\n", "**SparseMatrix**| | |✔️\n",
"**scalar** | | |🚫" "**scalar** |✔️ |✔️ |"
], ],
"metadata": { "metadata": {
"id": "bg45jnq8T9EJ" "id": "bg45jnq8T9EJ"
...@@ -597,10 +605,10 @@ ...@@ -597,10 +605,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -642,9 +650,9 @@ ...@@ -642,9 +650,9 @@
"\n", "\n",
"A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n", "A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n",
"----------------|---------------|----------------|----------\n", "----------------|---------------|----------------|----------\n",
"**DiagMatrix** | |🚫 |\n", "**DiagMatrix** |✔️ | |✔️\n",
"**SparseMatrix**|🚫 |🚫 |\n", "**SparseMatrix**| | |✔️\n",
"**scalar** |🚫 |🚫 |🚫" "**scalar** | | |"
], ],
"metadata": { "metadata": {
"id": "Xb2RU6H4UBCs" "id": "Xb2RU6H4UBCs"
...@@ -653,10 +661,10 @@ ...@@ -653,10 +661,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -694,9 +702,9 @@ ...@@ -694,9 +702,9 @@
"\n", "\n",
"A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n", "A \\\\ B | **DiagMatrix**|**SparseMatrix**|**scalar**\n",
"----------------|---------------|----------------|----------\n", "----------------|---------------|----------------|----------\n",
"**DiagMatrix** |🚫 |🚫 |\n", "**DiagMatrix** | | |✔️\n",
"**SparseMatrix**|🚫 |🚫 |\n", "**SparseMatrix**| | |✔️\n",
"**scalar** |🚫 |🚫 |🚫" "**scalar** | | |"
], ],
"metadata": { "metadata": {
"id": "2lZbyTYUUgSi" "id": "2lZbyTYUUgSi"
...@@ -705,10 +713,10 @@ ...@@ -705,10 +713,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -743,10 +751,10 @@ ...@@ -743,10 +751,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4.])\n", "val = torch.tensor([1., 2., 3., 4.])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"print(A.T.to_dense())\n", "print(A.T.to_dense())\n",
"print(\"\")\n", "print(\"\")\n",
"\n", "\n",
...@@ -805,10 +813,10 @@ ...@@ -805,10 +813,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4.])\n", "val = torch.tensor([1., 2., 3., 4.])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"print(\"\")\n", "print(\"\")\n",
"\n", "\n",
...@@ -878,9 +886,9 @@ ...@@ -878,9 +886,9 @@
"\n", "\n",
"A \\\\ B | **Tensor**|**DiagMatrix**|**SparseMatrix**\n", "A \\\\ B | **Tensor**|**DiagMatrix**|**SparseMatrix**\n",
"----------------|-----------|--------------|----------\n", "----------------|-----------|--------------|----------\n",
"**Tensor** | |🚫 |🚫\n", "**Tensor** |✔️ | |\n",
"**DiagMatrix** | | |\n", "**DiagMatrix** |✔️ |✔️ |✔️\n",
"**SparseMatrix**| | |" "**SparseMatrix**|✔️ |✔️ |✔️"
], ],
"metadata": { "metadata": {
"id": "THWE30v6WpAk" "id": "THWE30v6WpAk"
...@@ -900,17 +908,17 @@ ...@@ -900,17 +908,17 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A1 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A1 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A1:\")\n", "print(\"A1:\")\n",
"print(A1.to_dense())\n", "print(A1.to_dense())\n",
"\n", "\n",
"row = torch.tensor([0, 1, 2])\n", "i = torch.tensor([[0, 1, 2],\n",
"col = torch.tensor([0, 2, 1])\n", " [0, 2, 1]])\n",
"val = torch.tensor([4., 5., 6.])\n", "val = torch.tensor([4., 5., 6.])\n",
"A2 = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A2 = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A2:\")\n", "print(\"A2:\")\n",
"print(A2.to_dense())\n", "print(A2.to_dense())\n",
"\n", "\n",
...@@ -956,10 +964,10 @@ ...@@ -956,10 +964,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -996,10 +1004,10 @@ ...@@ -996,10 +1004,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([0, 2, 0])\n", " [0, 2, 0]])\n",
"val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n", "val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n",
"A = dglsp.from_coo(row, col, val, shape=(3, 3))\n", "A = dglsp.spmatrix(i, val, shape=(3, 3))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -1036,10 +1044,10 @@ ...@@ -1036,10 +1044,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([2, 3, 3])\n", " [2, 3, 3]])\n",
"val = torch.tensor([1., 2., 3.])\n", "val = torch.tensor([1., 2., 3.])\n",
"A = dglsp.from_coo(row, col, val, (3, 4))\n", "A = dglsp.spmatrix(i, val, (3, 4))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -1072,10 +1080,10 @@ ...@@ -1072,10 +1080,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([1, 1, 2])\n", "i = torch.tensor([[1, 1, 2],\n",
"col = torch.tensor([2, 3, 3])\n", " [2, 3, 3]])\n",
"val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n", "val = torch.tensor([[1., 1.], [2., 2.], [3., 3.]])\n",
"A = dglsp.from_coo(row, col, val, (3, 4))\n", "A = dglsp.spmatrix(i, val, (3, 4))\n",
"print(\"A:\")\n", "print(\"A:\")\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
...@@ -1121,10 +1129,10 @@ ...@@ -1121,10 +1129,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.randn(4)\n", "val = torch.randn(4)\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
"print(\"Apply tanh.\")\n", "print(\"Apply tanh.\")\n",
...@@ -1149,10 +1157,10 @@ ...@@ -1149,10 +1157,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.randn(4)\n", "val = torch.randn(4)\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"print(A.to_dense())\n", "print(A.to_dense())\n",
"\n", "\n",
"print(\"Apply exp.\")\n", "print(\"Apply exp.\")\n",
...@@ -1179,10 +1187,10 @@ ...@@ -1179,10 +1187,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 1, 1, 2])\n", "i = torch.tensor([[0, 1, 1, 2],\n",
"col = torch.tensor([1, 0, 2, 0])\n", " [1, 0, 2, 0]])\n",
"val = torch.tensor([1., 2., 3., 4.])\n", "val = torch.tensor([1., 2., 3., 4.])\n",
"A = dglsp.from_coo(row, col, val)\n", "A = dglsp.spmatrix(i, val)\n",
"\n", "\n",
"print(A.softmax())\n", "print(A.softmax())\n",
"print(\"In dense format:\")\n", "print(\"In dense format:\")\n",
...@@ -1220,9 +1228,9 @@ ...@@ -1220,9 +1228,9 @@
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [
"row = torch.tensor([0, 0, 1, 1, 2, 2, 3])\n", "i = torch.tensor([[0, 0, 1, 1, 2, 2, 3],\n",
"col = torch.tensor([1, 3, 2, 5, 3, 5, 4])\n", " [1, 3, 2, 5, 3, 5, 4]])\n",
"asym_A = dglsp.from_coo(row, col, shape=(6, 6))\n", "asym_A = dglsp.spmatrix(i, shape=(6, 6))\n",
"# Step 1: create symmetrical adjacency matrix A from asym_A.\n", "# Step 1: create symmetrical adjacency matrix A from asym_A.\n",
"# A =\n", "# A =\n",
"\n", "\n",
...@@ -1242,4 +1250,4 @@ ...@@ -1242,4 +1250,4 @@
"outputs": [] "outputs": []
} }
] ]
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment