model.py 3.03 KB
Newer Older
Mufei Li's avatar
Mufei Li committed
1
2
3
4
5
6
import torch
import torch.nn as nn
import torch.nn.functional as F

from dgl.nn.pytorch import GATConv

7

Mufei Li's avatar
Mufei Li committed
8
9
10
11
12
13
14
class SemanticAttention(nn.Module):
    def __init__(self, in_size, hidden_size=128):
        super(SemanticAttention, self).__init__()

        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
15
            nn.Linear(hidden_size, 1, bias=False),
Mufei Li's avatar
Mufei Li committed
16
17
18
        )

    def forward(self, z):
19
20
21
22
23
        w = self.project(z).mean(0)  # (M, 1)
        beta = torch.softmax(w, dim=0)  # (M, 1)
        beta = beta.expand((z.shape[0],) + beta.shape)  # (N, M, 1)

        return (beta * z).sum(1)  # (N, D * K)
Mufei Li's avatar
Mufei Li committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49


class HANLayer(nn.Module):
    """
    HAN layer.

    Arguments
    ---------
    num_meta_paths : number of homogeneous graphs generated from the metapaths.
    in_size : input feature dimension
    out_size : output feature dimension
    layer_num_heads : number of attention heads
    dropout : Dropout probability

    Inputs
    ------
    g : list[DGLGraph]
        List of graphs
    h : tensor
        Input features

    Outputs
    -------
    tensor
        The output feature
    """
50
51
52
53

    def __init__(
        self, num_meta_paths, in_size, out_size, layer_num_heads, dropout
    ):
Mufei Li's avatar
Mufei Li committed
54
55
56
57
58
        super(HANLayer, self).__init__()

        # One GAT layer for each meta path based adjacency matrix
        self.gat_layers = nn.ModuleList()
        for i in range(num_meta_paths):
59
60
61
62
63
64
65
66
67
68
69
70
71
            self.gat_layers.append(
                GATConv(
                    in_size,
                    out_size,
                    layer_num_heads,
                    dropout,
                    dropout,
                    activation=F.elu,
                )
            )
        self.semantic_attention = SemanticAttention(
            in_size=out_size * layer_num_heads
        )
Mufei Li's avatar
Mufei Li committed
72
73
74
75
76
77
78
        self.num_meta_paths = num_meta_paths

    def forward(self, gs, h):
        semantic_embeddings = []

        for i, g in enumerate(gs):
            semantic_embeddings.append(self.gat_layers[i](g, h).flatten(1))
79
80
81
82
83
        semantic_embeddings = torch.stack(
            semantic_embeddings, dim=1
        )  # (N, M, D * K)

        return self.semantic_attention(semantic_embeddings)  # (N, D * K)
Mufei Li's avatar
Mufei Li committed
84
85
86


class HAN(nn.Module):
87
88
89
    def __init__(
        self, num_meta_paths, in_size, hidden_size, out_size, num_heads, dropout
    ):
Mufei Li's avatar
Mufei Li committed
90
91
92
        super(HAN, self).__init__()

        self.layers = nn.ModuleList()
93
94
95
96
97
        self.layers.append(
            HANLayer(
                num_meta_paths, in_size, hidden_size, num_heads[0], dropout
            )
        )
Mufei Li's avatar
Mufei Li committed
98
        for l in range(1, len(num_heads)):
99
100
101
102
103
104
105
106
107
            self.layers.append(
                HANLayer(
                    num_meta_paths,
                    hidden_size * num_heads[l - 1],
                    hidden_size,
                    num_heads[l],
                    dropout,
                )
            )
Mufei Li's avatar
Mufei Li committed
108
109
110
111
112
113
114
        self.predict = nn.Linear(hidden_size * num_heads[-1], out_size)

    def forward(self, g, h):
        for gnn in self.layers:
            h = gnn(g, h)

        return self.predict(h)