encoder.py 7.48 KB
Newer Older
1
2
3
import time

import numpy as np
4
5
6
7
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.linalg import block_diag
8
from torch.nn import init
9
10
11

import dgl

12
from .dgl_layers import DiffPoolBatchedGraphLayer, GraphSage, GraphSageLayer
13
from .model_utils import batch2tensor
14
from .tensorized_layers import *
15

16

17
18
19
20
class DiffPool(nn.Module):
    """
    DiffPool Fuse
    """
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
    def __init__(
        self,
        input_dim,
        hidden_dim,
        embedding_dim,
        label_dim,
        activation,
        n_layers,
        dropout,
        n_pooling,
        linkpred,
        batch_size,
        aggregator_type,
        assign_dim,
        pool_ratio,
        cat=False,
    ):
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
        super(DiffPool, self).__init__()
        self.link_pred = linkpred
        self.concat = cat
        self.n_pooling = n_pooling
        self.batch_size = batch_size
        self.link_pred_loss = []
        self.entropy_loss = []

        # list of GNN modules before the first diffpool operation
        self.gc_before_pool = nn.ModuleList()
        self.diffpool_layers = nn.ModuleList()

        # list of list of GNN modules, each list after one diffpool operation
        self.gc_after_pool = nn.ModuleList()
        self.assign_dim = assign_dim
        self.bn = True
        self.num_aggs = 1

        # constructing layers
        # layers before diffpool
        assert n_layers >= 3, "n_layers too few"
60
61
62
63
64
65
66
        self.gc_before_pool.append(
            GraphSageLayer(
                input_dim,
                hidden_dim,
                activation,
                dropout,
                aggregator_type,
67
68
69
                self.bn,
            )
        )
70
        for _ in range(n_layers - 2):
71
72
73
74
75
76
77
            self.gc_before_pool.append(
                GraphSageLayer(
                    hidden_dim,
                    hidden_dim,
                    activation,
                    dropout,
                    aggregator_type,
78
79
80
                    self.bn,
                )
            )
81
82
        self.gc_before_pool.append(
            GraphSageLayer(
83
84
85
                hidden_dim, embedding_dim, None, dropout, aggregator_type
            )
        )
86
87
88
89
90
91

        assign_dims = []
        assign_dims.append(self.assign_dim)
        if self.concat:
            # diffpool layer receive pool_emedding_dim node feature tensor
            # and return pool_embedding_dim node embedding
92
            pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
93
94
95
        else:

            pool_embedding_dim = embedding_dim
96
97
98
99
100
101
102
103

        self.first_diffpool_layer = DiffPoolBatchedGraphLayer(
            pool_embedding_dim,
            self.assign_dim,
            hidden_dim,
            activation,
            dropout,
            aggregator_type,
104
105
            self.link_pred,
        )
106
        gc_after_per_pool = nn.ModuleList()
107

108
109
110
111
        for _ in range(n_layers - 1):
            gc_after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))
        gc_after_per_pool.append(BatchedGraphSAGE(hidden_dim, embedding_dim))
        self.gc_after_pool.append(gc_after_per_pool)
112

113
114
        self.assign_dim = int(self.assign_dim * pool_ratio)
        # each pooling module
115
116
117
118
119
120
        for _ in range(n_pooling - 1):
            self.diffpool_layers.append(
                BatchedDiffPool(
                    pool_embedding_dim,
                    self.assign_dim,
                    hidden_dim,
121
122
123
                    self.link_pred,
                )
            )
124
125
            gc_after_per_pool = nn.ModuleList()
            for _ in range(n_layers - 1):
126
                gc_after_per_pool.append(
127
128
                    BatchedGraphSAGE(hidden_dim, hidden_dim)
                )
129
            gc_after_per_pool.append(
130
131
                BatchedGraphSAGE(hidden_dim, embedding_dim)
            )
132
133
134
            self.gc_after_pool.append(gc_after_per_pool)
            assign_dims.append(self.assign_dim)
            self.assign_dim = int(self.assign_dim * pool_ratio)
135

136
137
        # predicting layer
        if self.concat:
138
139
140
            self.pred_input_dim = (
                pool_embedding_dim * self.num_aggs * (n_pooling + 1)
            )
141
        else:
142
            self.pred_input_dim = embedding_dim * self.num_aggs
143
144
145
146
147
        self.pred_layer = nn.Linear(self.pred_input_dim, label_dim)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
148
149
150
                m.weight.data = init.xavier_uniform_(
                    m.weight.data, gain=nn.init.calculate_gain("relu")
                )
151
152
                if m.bias is not None:
                    m.bias.data = init.constant_(m.bias.data, 0.0)
153

154
155
156
157
158
159
160
161
162
163
164
    def gcn_forward(self, g, h, gc_layers, cat=False):
        """
        Return gc_layer embedding cat.
        """
        block_readout = []
        for gc_layer in gc_layers[:-1]:
            h = gc_layer(g, h)
            block_readout.append(h)
        h = gc_layers[-1](g, h)
        block_readout.append(h)
        if cat:
165
            block = torch.cat(block_readout, dim=1)  # N x F, F = F1 + F2 + ...
166
167
168
        else:
            block = h
        return block
169

170
171
172
173
174
175
    def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
        block_readout = []
        for gc_layer in gc_layers:
            h = gc_layer(h, adj)
            block_readout.append(h)
        if cat:
176
            block = torch.cat(block_readout, dim=2)  # N x F, F = F1 + F2 + ...
177
178
179
        else:
            block = h
        return block
180

181
182
183
    def forward(self, g):
        self.link_pred_loss = []
        self.entropy_loss = []
184
        h = g.ndata["feat"]
185
186
187
188
189
190
191
192
193
        # node feature for assignment matrix computation is the same as the
        # original node feature
        h_a = h

        out_all = []

        # we use GCN blocks to get an embedding first
        g_embedding = self.gcn_forward(g, h, self.gc_before_pool, self.concat)

194
        g.ndata["h"] = g_embedding
195

196
        readout = dgl.sum_nodes(g, "h")
197
198
        out_all.append(readout)
        if self.num_aggs == 2:
199
            readout = dgl.max_nodes(g, "h")
200
            out_all.append(readout)
201

202
        adj, h = self.first_diffpool_layer(g, g_embedding)
203
        node_per_pool_graph = int(adj.size()[0] / len(g.batch_num_nodes()))
204

205
        h, adj = batch2tensor(adj, h, node_per_pool_graph)
206
        h = self.gcn_forward_tensorized(
207
208
            h, adj, self.gc_after_pool[0], self.concat
        )
209
210
211
212
213
214
215
216
        readout = torch.sum(h, dim=1)
        out_all.append(readout)
        if self.num_aggs == 2:
            readout, _ = torch.max(h, dim=1)
            out_all.append(readout)

        for i, diffpool_layer in enumerate(self.diffpool_layers):
            h, adj = diffpool_layer(h, adj)
217
            h = self.gcn_forward_tensorized(
218
219
                h, adj, self.gc_after_pool[i + 1], self.concat
            )
220
221
222
223
224
225
226
227
228
229
230
            readout = torch.sum(h, dim=1)
            out_all.append(readout)
            if self.num_aggs == 2:
                readout, _ = torch.max(h, dim=1)
                out_all.append(readout)
        if self.concat or self.num_aggs > 1:
            final_readout = torch.cat(out_all, dim=1)
        else:
            final_readout = readout
        ypred = self.pred_layer(final_readout)
        return ypred
231

232
    def loss(self, pred, label):
233
        """
234
        loss function
235
236
        """
        # softmax + CE
237
238
        criterion = nn.CrossEntropyLoss()
        loss = criterion(pred, label)
Peiqi Yin's avatar
Peiqi Yin committed
239
240
        for key, value in self.first_diffpool_layer.loss_log.items():
            loss += value
241
242
        for diffpool_layer in self.diffpool_layers:
            for key, value in diffpool_layer.loss_log.items():
243
                loss += value
244
        return loss