Unverified Commit 6c23fba8 authored by Quan (Andy) Gan's avatar Quan (Andy) Gan Committed by GitHub
Browse files

[Doc][Org] move edge_softmax to functional (#2442)



* move edge_softmax to functional

* change examples

* fixes

* revert

* fix

* fix

* remove

* fix

* test

* add init
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
parent a613ad88
......@@ -236,20 +236,6 @@ The following is an example showing how GSDDMM works:
Like GSpMM, GSDDMM operators support both homogeneous and bipartite graph.
Edge Softmax module
-------------------
DGL also provide framework agnostic edge softmax module which was frequently used in
GNN-like structures, e.g.
`Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`_,
`Transformer <https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_,
`Capsule <https://arxiv.org/pdf/1710.09829.pdf>`_, etc.
.. autosummary::
:toctree: ../../generated/
edge_softmax
Segment Reduce Module
---------------------
......
......@@ -10,3 +10,20 @@ dgl.nn
nn.pytorch
nn.mxnet
nn.tensorflow
dgl.nn.functional
=================
Edge Softmax module
-------------------
We also provide framework agnostic edge softmax module which was frequently used in
GNN-like structures, e.g.
`Graph Attention Network <https://arxiv.org/pdf/1710.10903.pdf>`_,
`Transformer <https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_,
`Capsule <https://arxiv.org/pdf/1710.09829.pdf>`_, etc.
.. autosummary::
:toctree: ../../generated/
nn.functional.edge_softmax
......@@ -10,7 +10,7 @@ Pytorch implementation: https://github.com/Diego999/pyGAT
import torch
import torch.nn as nn
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax, GATConv
from dgl.nn import GATConv
class GAT(nn.Module):
......
import torch
import math
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
from dgl.nn.functional import edge_softmax
from utlis import *
from torch import nn
import torch.nn.functional as F
......
......@@ -4,7 +4,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.ops import edge_softmax
from dgl.nn.functional import edge_softmax
class HGTLayer(nn.Module):
def __init__(self,
......
......@@ -4,7 +4,7 @@ import torch.nn as nn
from dgl import function as fn
from dgl._ffi.base import DGLError
from dgl.nn.pytorch.utils import Identity
from dgl.ops import edge_softmax
from dgl.nn.functional import edge_softmax
from dgl.utils import expand_as_pair
......
......@@ -10,7 +10,7 @@ Pytorch implementation: https://github.com/Diego999/pyGAT
import tensorflow as tf
from tensorflow.keras import layers
import dgl.function as fn
from dgl.nn.tensorflow import edge_softmax, GATConv
from dgl.nn import GATConv
class GAT(tf.keras.Model):
......
......@@ -16,6 +16,11 @@ with "[NN] XXX module".
import importlib
import sys
import os
# [BarclayII] Not sure what's going on with pylint.
# Possible issue: https://github.com/PyCQA/pylint/issues/2648
from . import functional # pylint: disable=import-self
from ..backend import backend_name
from ..utils import expand_as_pair
......
"""Functions related to DGL NN Modules."""
from ...ops import edge_softmax
......@@ -4,7 +4,7 @@ import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ....ops import edge_softmax
from ...functional import edge_softmax
from ..utils import normalize
from ....base import DGLError
from ....utils import expand_as_pair
......
......@@ -7,7 +7,7 @@ from mxnet.gluon.contrib.nn import Identity
from .... import function as fn
from ....base import DGLError
from ....ops import edge_softmax
from ...functional import edge_softmax
from ....utils import expand_as_pair
#pylint: enable=W0235
......
"""Gluon layer for graph related softmax."""
# pylint: disable= unused-import
from ...ops import edge_softmax
from ..functional import edge_softmax
......@@ -5,7 +5,7 @@ from torch import nn
from torch.nn import functional as F
from .... import function as fn
from ....ops import edge_softmax
from ...functional import edge_softmax
from ....base import DGLError
from ....utils import expand_as_pair
......
......@@ -3,7 +3,7 @@
from torch import nn
from .... import function as fn
from ....ops import edge_softmax
from ...functional import edge_softmax
from ....base import DGLError
from ....utils import expand_as_pair
......
......@@ -4,7 +4,7 @@ import torch as th
from torch import nn
from .... import function as fn
from ....ops import edge_softmax
from ...functional import edge_softmax
from ....base import DGLError
from ..utils import Identity
from ....utils import expand_as_pair
......
"""Torch modules for graph related softmax."""
# pylint: disable= unused-import
from ...ops import edge_softmax
from ..functional import edge_softmax
......@@ -6,7 +6,7 @@ import numpy as np
from .... import function as fn
from ....base import DGLError
from ....ops import edge_softmax
from ...functional import edge_softmax
from ..utils import Identity
# pylint: enable=W0235
......
"""tf modules for graph related softmax."""
# pylint: disable= unused-import
from ...ops import edge_softmax
from ..functional import edge_softmax
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment