model_utils.py 1.58 KB
Newer Older
1
2
3
import torch as th
from torch.autograd import Function

4

5
6
7
8
9
10
11
12
def batch2tensor(batch_adj, batch_feat, node_per_pool_graph):
    """
    transform a batched graph to batched adjacency tensor and node feature tensor
    """
    batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
    adj_list = []
    feat_list = []
    for i in range(batch_size):
13
14
15
16
17
18
19
        start = i * node_per_pool_graph
        end = (i + 1) * node_per_pool_graph
        adj_list.append(batch_adj[start:end, start:end])
        feat_list.append(batch_feat[start:end, :])
    adj_list = list(map(lambda x: th.unsqueeze(x, 0), adj_list))
    feat_list = list(map(lambda x: th.unsqueeze(x, 0), feat_list))
    adj = th.cat(adj_list, dim=0)
20
21
22
23
24
    feat = th.cat(feat_list, dim=0)

    return feat, adj


25
26
27
28
def masked_softmax(
    matrix, mask, dim=-1, memory_efficient=True, mask_fill_value=-1e32
):
    """
29
30
    masked_softmax for dgl batch graph
    code snippet contributed by AllenNLP (https://github.com/allenai/allennlp)
31
    """
32
33
34
35
36
37
38
39
40
41
42
    if mask is None:
        result = th.nn.functional.softmax(matrix, dim=dim)
    else:
        mask = mask.float()
        while mask.dim() < matrix.dim():
            mask = mask.unsqueeze(1)
        if not memory_efficient:
            result = th.nn.functional.softmax(matrix * mask, dim=dim)
            result = result * mask
            result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
        else:
43
44
45
            masked_matrix = matrix.masked_fill(
                (1 - mask).byte(), mask_fill_value
            )
46
47
            result = th.nn.functional.softmax(masked_matrix, dim=dim)
    return result