model.py 4.26 KB
Newer Older
1
import torch as th
Lingfan Yu's avatar
Lingfan Yu committed
2
3
4
import torch.nn as nn

class BaseRGCN(nn.Module):
Minjie Wang's avatar
Minjie Wang committed
5
6
7
    def __init__(self, num_nodes, h_dim, out_dim, num_rels, num_bases,
                 num_hidden_layers=1, dropout=0,
                 use_self_loop=False, use_cuda=False):
Lingfan Yu's avatar
Lingfan Yu committed
8
9
10
11
12
        super(BaseRGCN, self).__init__()
        self.num_nodes = num_nodes
        self.h_dim = h_dim
        self.out_dim = out_dim
        self.num_rels = num_rels
Minjie Wang's avatar
Minjie Wang committed
13
        self.num_bases = None if num_bases < 0 else num_bases
Lingfan Yu's avatar
Lingfan Yu committed
14
15
        self.num_hidden_layers = num_hidden_layers
        self.dropout = dropout
Minjie Wang's avatar
Minjie Wang committed
16
        self.use_self_loop = use_self_loop
Lingfan Yu's avatar
Lingfan Yu committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
        self.use_cuda = use_cuda

        # create rgcn layers
        self.build_model()

    def build_model(self):
        self.layers = nn.ModuleList()
        # i2h
        i2h = self.build_input_layer()
        if i2h is not None:
            self.layers.append(i2h)
        # h2h
        for idx in range(self.num_hidden_layers):
            h2h = self.build_hidden_layer(idx)
            self.layers.append(h2h)
        # h2o
        h2o = self.build_output_layer()
        if h2o is not None:
            self.layers.append(h2o)

    def build_input_layer(self):
        return None

Zihao Ye's avatar
Zihao Ye committed
40
    def build_hidden_layer(self, idx):
Lingfan Yu's avatar
Lingfan Yu committed
41
42
43
44
45
        raise NotImplementedError

    def build_output_layer(self):
        return None

Minjie Wang's avatar
Minjie Wang committed
46
    def forward(self, g, h, r, norm):
Lingfan Yu's avatar
Lingfan Yu committed
47
        for layer in self.layers:
Minjie Wang's avatar
Minjie Wang committed
48
49
            h = layer(g, h, r, norm)
        return h
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

class RelGraphEmbedLayer(nn.Module):
    r"""Embedding layer for featureless heterograph.
    Parameters
    ----------
    dev_id : int
        Device to run the layer.
    num_nodes : int
        Number of nodes.
    node_tides : tensor
        Storing the node type id for each node starting from 0
    num_of_ntype : int
        Number of node types
    input_size : list of int
        A list of input feature size for each node type. If None, we then 
        treat certain input feature as an one-hot encoding feature.
    embed_size : int
        Output embed size
    embed_name : str, optional
        Embed name
    """
    def __init__(self,
                 dev_id,
                 num_nodes,
                 node_tids,
                 num_of_ntype,
                 input_size,
                 embed_size,
                 sparse_emb=False,
                 embed_name='embed'):
        super(RelGraphEmbedLayer, self).__init__()
        self.dev_id = dev_id
        self.embed_size = embed_size
        self.embed_name = embed_name
        self.num_nodes = num_nodes
        self.sparse_emb = sparse_emb

        # create weight embeddings for each node for each relation
        self.embeds = nn.ParameterDict()
        self.num_of_ntype = num_of_ntype
        self.idmap = th.empty(num_nodes).long()

        for ntype in range(num_of_ntype):
            if input_size[ntype] is not None:
                loc = node_tids == ntype
                input_emb_size = node_tids[loc].shape[0]
                embed = nn.Parameter(th.Tensor(input_emb_size, self.embed_size))
                nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain('relu'))
                self.embeds[str(ntype)] = embed

        self.node_embeds = th.nn.Embedding(node_tids.shape[0], self.embed_size, sparse=self.sparse_emb)
        nn.init.uniform_(self.node_embeds.weight, -1.0, 1.0)

    def forward(self, node_ids, node_tids, features):
        """Forward computation
        Parameters
        ----------
        node_ids : tensor
            node ids to generate embedding for.
        node_ids : tensor
            node type ids
        features : list of features
            list of initial features for nodes belong to different node type.
            If None, the corresponding features is an one-hot encoding feature,
            else use the features directly as input feature and matmul a 
            projection matrix.
        Returns
        -------
        tensor
            embeddings as the input of the next layer
        """
        tsd_idx = node_ids < self.num_nodes
        tsd_ids = node_ids[tsd_idx]
        embeds = self.node_embeds(tsd_ids)
        for ntype in range(self.num_of_ntype):
            if features[ntype] is not None:
                loc = node_tids == ntype
                embeds[loc] = features[ntype] @ self.embeds[str(ntype)]

        return embeds.to(self.dev_id)