gcn.py 4.42 KB
Newer Older
rusty1s's avatar
rusty1s committed
1
from typing import Optional
rusty1s's avatar
rusty1s committed
2
3
4
5

import torch
from torch import Tensor
import torch.nn.functional as F
rusty1s's avatar
rusty1s committed
6
from torch.nn import ModuleList, Linear, BatchNorm1d
rusty1s's avatar
rusty1s committed
7
8
9
from torch_sparse import SparseTensor
from torch_geometric.nn import GCNConv

rusty1s's avatar
rusty1s committed
10
from torch_geometric_autoscale.models import ScalableGNN
rusty1s's avatar
rusty1s committed
11
12
13
14
15
16


class GCN(ScalableGNN):
    def __init__(self, num_nodes: int, in_channels, hidden_channels: int,
                 out_channels: int, num_layers: int, dropout: float = 0.0,
                 drop_input: bool = True, batch_norm: bool = False,
rusty1s's avatar
rusty1s committed
17
18
                 residual: bool = False, linear: bool = False,
                 pool_size: Optional[int] = None,
rusty1s's avatar
rusty1s committed
19
20
21
22
23
24
25
26
27
28
                 buffer_size: Optional[int] = None, device=None):
        super(GCN, self).__init__(num_nodes, hidden_channels, num_layers,
                                  pool_size, buffer_size, device)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.dropout = dropout
        self.drop_input = drop_input
        self.batch_norm = batch_norm
        self.residual = residual
rusty1s's avatar
rusty1s committed
29
30
        self.linear = linear

rusty1s's avatar
rusty1s committed
31
        self.lins = ModuleList()
rusty1s's avatar
rusty1s committed
32
33
34
        if linear:
            self.lins.append(Linear(in_channels, hidden_channels))
            self.lins.append(Linear(hidden_channels, out_channels))
rusty1s's avatar
rusty1s committed
35
36
37

        self.convs = ModuleList()
        for i in range(num_layers):
rusty1s's avatar
rusty1s committed
38
39
40
41
42
            in_dim = out_dim = hidden_channels
            if i == 0 and not linear:
                in_dim = in_channels
            if i == num_layers - 1 and not linear:
                out_dim = out_channels
rusty1s's avatar
rusty1s committed
43
44
45
46
            conv = GCNConv(in_dim, out_dim, normalize=False)
            self.convs.append(conv)

        self.bns = ModuleList()
rusty1s's avatar
rusty1s committed
47
        for i in range(num_layers):
rusty1s's avatar
rusty1s committed
48
49
50
51
52
            bn = BatchNorm1d(hidden_channels)
            self.bns.append(bn)

    @property
    def reg_modules(self):
rusty1s's avatar
rusty1s committed
53
54
55
56
        if self.linear:
            return ModuleList(list(self.convs) + list(self.bns))
        else:
            return ModuleList(list(self.convs[:-1]) + list(self.bns))
rusty1s's avatar
rusty1s committed
57
58
59

    @property
    def nonreg_modules(self):
rusty1s's avatar
rusty1s committed
60
        return self.lins if self.linear else self.convs[-1:]
rusty1s's avatar
rusty1s committed
61
62
63

    def reset_parameters(self):
        super(GCN, self).reset_parameters()
rusty1s's avatar
rusty1s committed
64
65
        for lin in self.lins:
            lin.reset_parameters()
rusty1s's avatar
rusty1s committed
66
67
68
69
70
71
72
73
74
75
76
77
78
        for conv in self.convs:
            conv.reset_parameters()
        for bn in self.bns:
            bn.reset_parameters()

    def forward(self, x: Tensor, adj_t: SparseTensor,
                batch_size: Optional[int] = None,
                n_id: Optional[Tensor] = None, offset: Optional[Tensor] = None,
                count: Optional[Tensor] = None) -> Tensor:

        if self.drop_input:
            x = F.dropout(x, p=self.dropout, training=self.training)

rusty1s's avatar
rusty1s committed
79
80
81
82
        if self.linear:
            x = self.lins[0](x).relu_()
            x = F.dropout(x, p=self.dropout, training=self.training)

rusty1s's avatar
rusty1s committed
83
84
85
86
87
88
89
90
91
92
        for conv, bn, hist in zip(self.convs[:-1], self.bns, self.histories):
            h = conv(x, adj_t)
            if self.batch_norm:
                h = bn(h)
            if self.residual and h.size(-1) == x.size(-1):
                h += x[:h.size(0)]
            x = h.relu_()
            x = self.push_and_pull(hist, x, batch_size, n_id, offset, count)
            x = F.dropout(x, p=self.dropout, training=self.training)

rusty1s's avatar
rusty1s committed
93
94
95
96
97
98
99
100
101
102
103
104
        h = self.convs[-1](x, adj_t)

        if not self.linear:
            return h

        if self.batch_norm:
            h = self.bns[-1](h)
        if self.residual and h.size(-1) == x.size(-1):
            h += x[:h.size(0)]
        h = h.relu_()
        h = F.dropout(h, p=self.dropout, training=self.training)
        return self.lins[1](h)
rusty1s's avatar
rusty1s committed
105
106

    @torch.no_grad()
rusty1s's avatar
rusty1s committed
107
108
109
110
111
112
113
    def forward_layer(self, layer, x, adj_t, state):
        if layer == 0:
            if self.drop_input:
                x = F.dropout(x, p=self.dropout, training=self.training)
            if self.linear:
                x = self.lins[0](x).relu_()
                x = F.dropout(x, p=self.dropout, training=self.training)
rusty1s's avatar
rusty1s committed
114
115
116
117
118
        else:
            x = F.dropout(x, p=self.dropout, training=self.training)

        h = self.convs[layer](x, adj_t)

rusty1s's avatar
rusty1s committed
119
        if layer < self.num_layers - 1 or self.linear:
rusty1s's avatar
rusty1s committed
120
121
122
123
124
            if self.batch_norm:
                h = self.bns[layer](h)
            if self.residual and h.size(-1) == x.size(-1):
                h += x[:h.size(0)]
            h = h.relu_()
rusty1s's avatar
rusty1s committed
125
126
127
128
129

        if self.linear:
            h = F.dropout(h, p=self.dropout, training=self.training)
            h = self.lins[1](h)

rusty1s's avatar
rusty1s committed
130
        return h