data.py 5.38 KB
Newer Older
rusty1s's avatar
rusty1s committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import torch
import torch_geometric.transforms as T
from ogb.nodeproppred import PygNodePropPredDataset
from torch_geometric.data import Batch
from torch_geometric.datasets import (Planetoid, WikiCS, Coauthor, Amazon,
                                      GNNBenchmarkDataset, Yelp, Flickr,
                                      Reddit2, PPI)

from .utils import index2mask, gen_masks


def get_planetoid(root, name):
    dataset = Planetoid(
        f'{root}/Planetoid', name,
        transform=T.Compose([T.NormalizeFeatures(),
                             T.ToSparseTensor()]))

    return dataset[0], dataset.num_features, dataset.num_classes


def get_wikics(root):
    dataset = WikiCS(f'{root}/WIKICS', transform=T.ToSparseTensor())
    data = dataset[0]
    data.adj_t = data.adj_t.to_symmetric()
    data.val_mask = data.stopping_mask
    data.stopping_mask = None
    return data, dataset.num_features, dataset.num_classes


def get_coauthor(root, name):
    dataset = Coauthor(f'{root}/Coauthor', name, transform=T.ToSparseTensor())
    data = dataset[0]
    torch.manual_seed(12345)
    data.train_mask, data.val_mask, data.test_mask = gen_masks(
        data.y, 20, 30, 20)
    return data, dataset.num_features, dataset.num_classes


def get_amazon(root, name):
    dataset = Amazon(f'{root}/Amazon', name, transform=T.ToSparseTensor())
    data = dataset[0]
    torch.manual_seed(12345)
    data.train_mask, data.val_mask, data.test_mask = gen_masks(
        data.y, 20, 30, 20)
    return data, dataset.num_features, dataset.num_classes


def get_arxiv(root):
    dataset = PygNodePropPredDataset('ogbn-arxiv', f'{root}/OGB',
                                     pre_transform=T.ToSparseTensor())
    data = dataset[0]

    data.adj_t = data.adj_t.to_symmetric()
    data.node_year = None
    data.y = data.y.view(-1)

    split_idx = dataset.get_idx_split()
    data.train_mask = index2mask(split_idx['train'], data.num_nodes)
    data.val_mask = index2mask(split_idx['valid'], data.num_nodes)
    data.test_mask = index2mask(split_idx['test'], data.num_nodes)

    return data, dataset.num_features, dataset.num_classes


def get_products(root):
    dataset = PygNodePropPredDataset('ogbn-products', f'{root}/OGB',
                                     pre_transform=T.ToSparseTensor())
    data = dataset[0]

    data.y = data.y.view(-1)

    split_idx = dataset.get_idx_split()
    data.train_mask = index2mask(split_idx['train'], data.num_nodes)
    data.val_mask = index2mask(split_idx['valid'], data.num_nodes)
    data.test_mask = index2mask(split_idx['test'], data.num_nodes)

    return data, dataset.num_features, dataset.num_classes


def get_proteins(root):
    dataset = PygNodePropPredDataset('ogbn-proteins', f'{root}/OGB',
                                     pre_transform=T.ToSparseTensor())
    data = dataset[0]

    data.node_species = None
    data.y = data.y.to(torch.float)

    split_idx = dataset.get_idx_split()
    data.train_mask = index2mask(split_idx['train'], data.num_nodes)
    data.val_mask = index2mask(split_idx['valid'], data.num_nodes)
    data.test_mask = index2mask(split_idx['test'], data.num_nodes)

    return data, dataset.num_features, data.y.size(-1)


def get_yelp(root):
    dataset = Yelp(f'{root}/YELP', pre_transform=T.ToSparseTensor())
    data = dataset[0]
    data.x = (data.x - data.x.mean(dim=0)) / data.x.std(dim=0)
    return data, dataset.num_features, dataset.num_classes


def get_flickr(root):
    dataset = Flickr(f'{root}/Flickr', pre_transform=T.ToSparseTensor())
    return dataset[0], dataset.num_features, dataset.num_classes


def get_reddit(root):
    dataset = Reddit2(f'{root}/Reddit2', pre_transform=T.ToSparseTensor())
    data = dataset[0]
    data.x = (data.x - data.x.mean(dim=0)) / data.x.std(dim=0)
    return data, dataset.num_features, dataset.num_classes


def get_ppi(root, split='train'):
    dataset = PPI(f'{root}/PPI', split=split, pre_transform=T.ToSparseTensor())

    data = Batch.from_data_list(dataset)
    data.batch = None
    data.ptr = None
    data[f'{split}_mask'] = torch.ones(data.num_nodes, dtype=torch.bool)

    return data, dataset.num_features, dataset.num_classes


def get_sbm(root, name):
    dataset = GNNBenchmarkDataset(f'{root}/SBM', name, split='train',
                                  pre_transform=T.ToSparseTensor())

    data = Batch.from_data_list(dataset)
    data.batch = None
    data.ptr = None

    return data, dataset.num_features, dataset.num_classes


def get_data(root, name):
    if name.lower() in ['cora', 'citeseer', 'pubmed']:
        return get_planetoid(root, name)
    if name.lower() == 'wikics':
        return get_wikics(root)
    if name.lower() in ['coauthorcs', 'coauthorphysics']:
        return get_coauthor(root, name[8:])
    if name.lower() in ['amazoncomputers', 'amazonphoto']:
        return get_amazon(root, name[6:])
    if name.lower() in ['ogbn-arxiv', 'arxiv']:
        return get_arxiv(root)
    if name.lower() in ['ogbn-products', 'products']:
        return get_products(root)
    if name.lower() == ['ogbn-proteins', 'proteins']:
        return get_proteins(root)
    if name.lower() == 'yelp':
        return get_yelp(root)
    if name.lower() == 'flickr':
        return get_flickr(root)
    if name.lower() == 'reddit':
        return get_reddit(root)
    if name.lower() == 'ppi':
        return get_ppi(root)
    if name.lower() in ['cluster', 'pattern']:
        return get_sbm(root, name)
    raise NotImplementedError