import dgl.function as fn import torch.nn as nn import torch.nn.functional as F from dgl.nn.pytorch.glob import AvgPooling from layers import GENConv from ogb.graphproppred.mol_encoder import AtomEncoder class DeeperGCN(nn.Module): r""" Description ----------- Introduced in "DeeperGCN: All You Need to Train Deeper GCNs " Parameters ---------- node_feat_dim: int Size of node feature. edge_feat_dim: int Size of edge feature. hid_dim: int Size of hidden representations. out_dim: int Size of output. num_layers: int Number of graph convolutional layers. dropout: float Dropout rate. Default is 0. beta: float A continuous variable called an inverse temperature. Default is 1.0. learn_beta: bool Whether beta is a learnable weight. Default is False. aggr: str Type of aggregation. Default is 'softmax'. mlp_layers: int Number of MLP layers in message normalization. Default is 1. """ def __init__( self, node_feat_dim, edge_feat_dim, hid_dim, out_dim, num_layers, dropout=0.0, beta=1.0, learn_beta=False, aggr="softmax", mlp_layers=1, ): super(DeeperGCN, self).__init__() self.num_layers = num_layers self.dropout = dropout self.gcns = nn.ModuleList() self.norms = nn.ModuleList() for _ in range(self.num_layers): conv = GENConv( in_dim=hid_dim, out_dim=hid_dim, aggregator=aggr, beta=beta, learn_beta=learn_beta, mlp_layers=mlp_layers, ) self.gcns.append(conv) self.norms.append(nn.BatchNorm1d(hid_dim, affine=True)) self.node_encoder = AtomEncoder(hid_dim) self.pooling = AvgPooling() self.output = nn.Linear(hid_dim, out_dim) def forward(self, g, edge_feats, node_feats=None): with g.local_scope(): hv = self.node_encoder(node_feats) he = edge_feats for layer in range(self.num_layers): hv1 = self.norms[layer](hv) hv1 = F.relu(hv1) hv1 = F.dropout(hv1, p=self.dropout, training=self.training) hv = self.gcns[layer](g, hv1, he) + hv h_g = self.pooling(g, hv) return self.output(h_g)