encoder.py 7.25 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F

import numpy as np
from scipy.linalg import block_diag

import dgl

from .dgl_layers import GraphSage, GraphSageLayer, DiffPoolBatchedGraphLayer
from .tensorized_layers import *
from .model_utils import batch2tensor
import time

16

17
18
19
20
class DiffPool(nn.Module):
    """
    DiffPool Fuse
    """
21
22
23
24
25

    def __init__(self, input_dim, hidden_dim, embedding_dim,
                 label_dim, activation, n_layers, dropout,
                 n_pooling, linkpred, batch_size, aggregator_type,
                 assign_dim, pool_ratio, cat=False):
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
        super(DiffPool, self).__init__()
        self.link_pred = linkpred
        self.concat = cat
        self.n_pooling = n_pooling
        self.batch_size = batch_size
        self.link_pred_loss = []
        self.entropy_loss = []

        # list of GNN modules before the first diffpool operation
        self.gc_before_pool = nn.ModuleList()
        self.diffpool_layers = nn.ModuleList()

        # list of list of GNN modules, each list after one diffpool operation
        self.gc_after_pool = nn.ModuleList()
        self.assign_dim = assign_dim
        self.bn = True
        self.num_aggs = 1

        # constructing layers
        # layers before diffpool
        assert n_layers >= 3, "n_layers too few"
47
48
49
50
51
52
53
54
        self.gc_before_pool.append(
            GraphSageLayer(
                input_dim,
                hidden_dim,
                activation,
                dropout,
                aggregator_type,
                self.bn))
55
        for _ in range(n_layers - 2):
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
            self.gc_before_pool.append(
                GraphSageLayer(
                    hidden_dim,
                    hidden_dim,
                    activation,
                    dropout,
                    aggregator_type,
                    self.bn))
        self.gc_before_pool.append(
            GraphSageLayer(
                hidden_dim,
                embedding_dim,
                None,
                dropout,
                aggregator_type))
71
72
73
74
75
76

        assign_dims = []
        assign_dims.append(self.assign_dim)
        if self.concat:
            # diffpool layer receive pool_emedding_dim node feature tensor
            # and return pool_embedding_dim node embedding
77
            pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
78
79
80
        else:

            pool_embedding_dim = embedding_dim
81
82
83
84
85
86
87
88
89

        self.first_diffpool_layer = DiffPoolBatchedGraphLayer(
            pool_embedding_dim,
            self.assign_dim,
            hidden_dim,
            activation,
            dropout,
            aggregator_type,
            self.link_pred)
90
        gc_after_per_pool = nn.ModuleList()
91

92
93
94
95
        for _ in range(n_layers - 1):
            gc_after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))
        gc_after_per_pool.append(BatchedGraphSAGE(hidden_dim, embedding_dim))
        self.gc_after_pool.append(gc_after_per_pool)
96

97
98
        self.assign_dim = int(self.assign_dim * pool_ratio)
        # each pooling module
99
100
101
102
103
104
105
        for _ in range(n_pooling - 1):
            self.diffpool_layers.append(
                BatchedDiffPool(
                    pool_embedding_dim,
                    self.assign_dim,
                    hidden_dim,
                    self.link_pred))
106
107
            gc_after_per_pool = nn.ModuleList()
            for _ in range(n_layers - 1):
108
109
110
111
112
113
                gc_after_per_pool.append(
                    BatchedGraphSAGE(
                        hidden_dim, hidden_dim))
            gc_after_per_pool.append(
                BatchedGraphSAGE(
                    hidden_dim, embedding_dim))
114
115
116
            self.gc_after_pool.append(gc_after_per_pool)
            assign_dims.append(self.assign_dim)
            self.assign_dim = int(self.assign_dim * pool_ratio)
117

118
119
        # predicting layer
        if self.concat:
120
121
            self.pred_input_dim = pool_embedding_dim * \
                self.num_aggs * (n_pooling + 1)
122
        else:
123
            self.pred_input_dim = embedding_dim * self.num_aggs
124
125
126
127
128
129
        self.pred_layer = nn.Linear(self.pred_input_dim, label_dim)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
                m.weight.data = init.xavier_uniform_(m.weight.data,
130
                                                     gain=nn.init.calculate_gain('relu'))
131
132
                if m.bias is not None:
                    m.bias.data = init.constant_(m.bias.data, 0.0)
133

134
135
136
137
138
139
140
141
142
143
144
    def gcn_forward(self, g, h, gc_layers, cat=False):
        """
        Return gc_layer embedding cat.
        """
        block_readout = []
        for gc_layer in gc_layers[:-1]:
            h = gc_layer(g, h)
            block_readout.append(h)
        h = gc_layers[-1](g, h)
        block_readout.append(h)
        if cat:
145
            block = torch.cat(block_readout, dim=1)  # N x F, F = F1 + F2 + ...
146
147
148
        else:
            block = h
        return block
149

150
151
152
153
154
155
    def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
        block_readout = []
        for gc_layer in gc_layers:
            h = gc_layer(h, adj)
            block_readout.append(h)
        if cat:
156
            block = torch.cat(block_readout, dim=2)  # N x F, F = F1 + F2 + ...
157
158
159
        else:
            block = h
        return block
160

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
    def forward(self, g):
        self.link_pred_loss = []
        self.entropy_loss = []
        h = g.ndata['feat']
        # node feature for assignment matrix computation is the same as the
        # original node feature
        h_a = h

        out_all = []

        # we use GCN blocks to get an embedding first
        g_embedding = self.gcn_forward(g, h, self.gc_before_pool, self.concat)

        g.ndata['h'] = g_embedding

        readout = dgl.sum_nodes(g, 'h')
        out_all.append(readout)
        if self.num_aggs == 2:
            readout = dgl.max_nodes(g, 'h')
            out_all.append(readout)
181

182
        adj, h = self.first_diffpool_layer(g, g_embedding)
183
        node_per_pool_graph = int(adj.size()[0] / len(g.batch_num_nodes()))
184

185
        h, adj = batch2tensor(adj, h, node_per_pool_graph)
186
187
        h = self.gcn_forward_tensorized(
            h, adj, self.gc_after_pool[0], self.concat)
188
189
190
191
192
193
194
195
        readout = torch.sum(h, dim=1)
        out_all.append(readout)
        if self.num_aggs == 2:
            readout, _ = torch.max(h, dim=1)
            out_all.append(readout)

        for i, diffpool_layer in enumerate(self.diffpool_layers):
            h, adj = diffpool_layer(h, adj)
196
197
            h = self.gcn_forward_tensorized(
                h, adj, self.gc_after_pool[i + 1], self.concat)
198
199
200
201
202
203
204
205
206
207
208
            readout = torch.sum(h, dim=1)
            out_all.append(readout)
            if self.num_aggs == 2:
                readout, _ = torch.max(h, dim=1)
                out_all.append(readout)
        if self.concat or self.num_aggs > 1:
            final_readout = torch.cat(out_all, dim=1)
        else:
            final_readout = readout
        ypred = self.pred_layer(final_readout)
        return ypred
209

210
211
212
213
214
215
216
217
218
    def loss(self, pred, label):
        '''
        loss function
        '''
        #softmax + CE
        criterion = nn.CrossEntropyLoss()
        loss = criterion(pred, label)
        for diffpool_layer in self.diffpool_layers:
            for key, value in diffpool_layer.loss_log.items():
219
                loss += value
220
        return loss