test_data.py 54.9 KB
Newer Older
1
2
import unittest
import backend as F
3
import numpy as np
4
5
6
import gzip
import tempfile
import os
7
8
9
import pandas as pd
import yaml
import pytest
10
import dgl
11
12
import dgl.data as data
from dgl import DGLError
13
import dgl
14
15

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
16
17
18
19
def test_minigc():
    ds = data.MiniGCDataset(16, 10, 20)
    g, l = list(zip(*ds))
    print(g, l)
20
21
22
23
24
    g1 = ds[0][0]
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    ds = data.MiniGCDataset(16, 10, 20, transform=transform)
    g2 = ds[0][0]
    assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
25
26
27
28
29
30
31
32
33
34

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gin():
    ds_n_graphs = {
        'MUTAG': 188,
        'IMDBBINARY': 1000,
        'IMDBMULTI': 1500,
        'PROTEINS': 1113,
        'PTC': 344,
    }
35
    transform = dgl.AddSelfLoop(allow_duplicate=True)
36
37
38
    for name, n_graphs in ds_n_graphs.items():
        ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False)
        assert len(ds) == n_graphs, (len(ds), name)
39
40
41
42
        g1 = ds[0][0]
        ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False, transform=transform)
        g2 = ds[0][0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
43

44
45
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fraud():
46
47
    transform = dgl.AddSelfLoop(allow_duplicate=True)

48
49
    g = data.FraudDataset('amazon')[0]
    assert g.num_nodes() == 11944
50
51
52
53
    num_edges1 = g.num_edges()
    g2 = data.FraudDataset('amazon', transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - num_edges1 == g.num_nodes() * 3
54
55
56

    g = data.FraudAmazonDataset()[0]
    assert g.num_nodes() == 11944
57
58
59
    g2 = data.FraudAmazonDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
60
61
62

    g = data.FraudYelpDataset()[0]
    assert g.num_nodes() == 45954
63
64
65
    g2 = data.FraudYelpDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
66
67
68

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fakenews():
69
70
    transform = dgl.AddSelfLoop(allow_duplicate=True)

71
72
    ds = data.FakeNewsDataset('politifact', 'bert')
    assert len(ds) == 314
73
74
75
    g = ds[0][0]
    g2 = data.FakeNewsDataset('politifact', 'bert', transform=transform)[0][0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
76
77
78

    ds = data.FakeNewsDataset('gossipcop', 'profile')
    assert len(ds) == 5464
79
80
81
    g = ds[0][0]
    g2 = data.FakeNewsDataset('gossipcop', 'profile', transform=transform)[0][0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
Jinjing Zhou's avatar
Jinjing Zhou committed
82
83

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
84
def test_tudataset_regression():
Jinjing Zhou's avatar
Jinjing Zhou committed
85
86
    ds = data.TUDataset('ZINC_test', force_reload=True)
    assert len(ds) == 5000
87
    g = ds[0][0]
Jinjing Zhou's avatar
Jinjing Zhou committed
88

89
90
91
92
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    ds = data.TUDataset('ZINC_test', force_reload=True, transform=transform)
    g2 = ds[0][0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
93

94
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
95
96
97
def test_data_hash():
    class HashTestDataset(data.DGLDataset):
        def __init__(self, hash_key=()):
98
99
100
            super(HashTestDataset, self).__init__(
                'hashtest', hash_key=hash_key)

101
102
103
        def _load(self):
            pass

104
105
106
    a = HashTestDataset((True, 0, '1', (1, 2, 3)))
    b = HashTestDataset((True, 0, '1', (1, 2, 3)))
    c = HashTestDataset((True, 0, '1', (1, 2, 4)))
107
108
109
    assert a.hash == b.hash
    assert a.hash != c.hash

110

111
112
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_citation_graph():
113
114
    transform = dgl.AddSelfLoop(allow_duplicate=True)

115
116
117
118
119
120
    # cora
    g = data.CoraGraphDataset()[0]
    assert g.num_nodes() == 2708
    assert g.num_edges() == 10556
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
121
122
    g2 = data.CoraGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
123
124
125
126
127
128
129

    # Citeseer
    g = data.CiteseerGraphDataset()[0]
    assert g.num_nodes() == 3327
    assert g.num_edges() == 9228
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
130
131
    g2 = data.CiteseerGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
132
133
134
135
136
137
138

    # Pubmed
    g = data.PubmedGraphDataset()[0]
    assert g.num_nodes() == 19717
    assert g.num_edges() == 88651
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
139
140
    g2 = data.PubmedGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
141
142
143
144


@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gnn_benchmark():
145
146
    transform = dgl.AddSelfLoop(allow_duplicate=True)

147
148
149
150
151
152
    # AmazonCoBuyComputerDataset
    g = data.AmazonCoBuyComputerDataset()[0]
    assert g.num_nodes() == 13752
    assert g.num_edges() == 491722
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
153
154
    g2 = data.AmazonCoBuyComputerDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
155
156
157
158
159
160
161

    # AmazonCoBuyPhotoDataset
    g = data.AmazonCoBuyPhotoDataset()[0]
    assert g.num_nodes() == 7650
    assert g.num_edges() == 238163
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
162
163
    g2 = data.AmazonCoBuyPhotoDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
164
165
166
167
168
169
170

    # CoauthorPhysicsDataset
    g = data.CoauthorPhysicsDataset()[0]
    assert g.num_nodes() == 34493
    assert g.num_edges() == 495924
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
171
172
    g2 = data.CoauthorPhysicsDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
173
174
175
176
177
178
179

    # CoauthorCSDataset
    g = data.CoauthorCSDataset()[0]
    assert g.num_nodes() == 18333
    assert g.num_edges() == 163788
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
180
181
    g2 = data.CoauthorCSDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
182
183
184
185
186
187
188

    # CoraFullDataset
    g = data.CoraFullDataset()[0]
    assert g.num_nodes() == 19793
    assert g.num_edges() == 126842
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
189
190
    g2 = data.CoraFullDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
191
192
193
194
195
196
197
198
199
200
201


@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_reddit():
    # RedditDataset
    g = data.RedditDataset()[0]
    assert g.num_nodes() == 232965
    assert g.num_edges() == 114615892
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

202
203
204
205
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.RedditDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()

206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_extract_archive():
    # gzip
    with tempfile.TemporaryDirectory() as src_dir:
        gz_file = 'gz_archive'
        gz_path = os.path.join(src_dir, gz_file + '.gz')
        content = b"test extract archive gzip"
        with gzip.open(gz_path, 'wb') as f:
            f.write(content)
        with tempfile.TemporaryDirectory() as dst_dir:
            data.utils.extract_archive(gz_path, dst_dir, overwrite=True)
            assert os.path.exists(os.path.join(dst_dir, gz_file))


221
def _test_construct_graphs_homo():
222
    from dgl.data.csv_dataset_base import NodeData, EdgeData, DGLGraphConstructor
223
    # node_id/src_id/dst_id could be non-sorted, duplicated, non-numeric.
224
225
226
227
228
229
230
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
    num_dup_nodes = int(num_nodes*0.2)
    node_ids = np.random.choice(
        np.arange(num_nodes*2), size=num_nodes, replace=False)
    assert len(node_ids) == num_nodes
231
    # to be non-sorted
232
    np.random.shuffle(node_ids)
233
    # to be duplicated
234
    node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
235
236
    # to be non-numeric
    node_ids = ['id_{}'.format(id) for id in node_ids]
237
238
239
240
241
    t_ndata = {'feat': np.random.rand(num_nodes+num_dup_nodes, num_dims),
               'label': np.random.randint(2, size=num_nodes+num_dup_nodes)}
    _, u_indices = np.unique(node_ids, return_index=True)
    ndata = {'feat': t_ndata['feat'][u_indices],
             'label': t_ndata['label'][u_indices]}
242
    node_data = NodeData(node_ids, t_ndata)
243
244
245
246
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
    edata = {'feat': np.random.rand(
        num_edges, num_dims), 'label': np.random.randint(2, size=num_edges)}
247
248
    edge_data = EdgeData(src_ids, dst_ids, edata)
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
        node_data, edge_data)
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == num_nodes
    assert g.num_edges() == num_edges

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
            assert F.array_equal(F.tensor(value), rhs[key])
    assert_data(ndata, g.ndata)
    assert_data(edata, g.edata)


def _test_construct_graphs_hetero():
266
    from dgl.data.csv_dataset_base import NodeData, EdgeData, DGLGraphConstructor
267
    # node_id/src_id/dst_id could be non-sorted, duplicated, non-numeric.
268
269
270
271
272
273
274
275
276
277
278
279
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
    num_dup_nodes = int(num_nodes*0.2)
    ntypes = ['user', 'item']
    node_data = []
    node_ids_dict = {}
    ndata_dict = {}
    for ntype in ntypes:
        node_ids = np.random.choice(
            np.arange(num_nodes*2), size=num_nodes, replace=False)
        assert len(node_ids) == num_nodes
280
        # to be non-sorted
281
        np.random.shuffle(node_ids)
282
        # to be duplicated
283
        node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
284
285
        # to be non-numeric
        node_ids = ['id_{}'.format(id) for id in node_ids]
286
287
288
289
290
        t_ndata = {'feat': np.random.rand(num_nodes+num_dup_nodes, num_dims),
                   'label': np.random.randint(2, size=num_nodes+num_dup_nodes)}
        _, u_indices = np.unique(node_ids, return_index=True)
        ndata = {'feat': t_ndata['feat'][u_indices],
                 'label': t_ndata['label'][u_indices]}
291
        node_data.append(NodeData(node_ids, t_ndata, type=ntype))
292
293
294
295
296
297
298
299
300
301
        node_ids_dict[ntype] = node_ids
        ndata_dict[ntype] = ndata
    etypes = [('user', 'follow', 'user'), ('user', 'like', 'item')]
    edge_data = []
    edata_dict = {}
    for src_type, e_type, dst_type in etypes:
        src_ids = np.random.choice(node_ids_dict[src_type], size=num_edges)
        dst_ids = np.random.choice(node_ids_dict[dst_type], size=num_edges)
        edata = {'feat': np.random.rand(
            num_edges, num_dims), 'label': np.random.randint(2, size=num_edges)}
302
        edge_data.append(EdgeData(src_ids, dst_ids, edata,
303
304
                         type=(src_type, e_type, dst_type)))
        edata_dict[(src_type, e_type, dst_type)] = edata
305
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
        node_data, edge_data)
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert not g.is_homogeneous
    assert g.num_nodes() == num_nodes*len(ntypes)
    assert g.num_edges() == num_edges*len(etypes)

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
            assert F.array_equal(F.tensor(value), rhs[key])
    for ntype in g.ntypes:
        assert g.num_nodes(ntype) == num_nodes
        assert_data(ndata_dict[ntype], g.nodes[ntype].data)
    for etype in g.canonical_etypes:
        assert g.num_edges(etype) == num_edges
        assert_data(edata_dict[etype], g.edges[etype].data)


def _test_construct_graphs_multiple():
327
    from dgl.data.csv_dataset_base import NodeData, EdgeData, GraphData, DGLGraphConstructor
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
    num_nodes = 100
    num_edges = 1000
    num_graphs = 10
    num_dims = 3
    node_ids = np.array([], dtype=np.int)
    src_ids = np.array([], dtype=np.int)
    dst_ids = np.array([], dtype=np.int)
    ngraph_ids = np.array([], dtype=np.int)
    egraph_ids = np.array([], dtype=np.int)
    u_indices = np.array([], dtype=np.int)
    for i in range(num_graphs):
        l_node_ids = np.random.choice(
            np.arange(num_nodes*2), size=num_nodes, replace=False)
        node_ids = np.append(node_ids, l_node_ids)
        _, l_u_indices = np.unique(l_node_ids, return_index=True)
        u_indices = np.append(u_indices, l_u_indices)
        ngraph_ids = np.append(ngraph_ids, np.full(num_nodes, i))
        src_ids = np.append(src_ids, np.random.choice(
            l_node_ids, size=num_edges))
        dst_ids = np.append(dst_ids, np.random.choice(
            l_node_ids, size=num_edges))
        egraph_ids = np.append(egraph_ids, np.full(num_edges, i))
    ndata = {'feat': np.random.rand(num_nodes*num_graphs, num_dims),
             'label': np.random.randint(2, size=num_nodes*num_graphs)}
352
    ngraph_ids = ['graph_{}'.format(id) for id in ngraph_ids]
353
    node_data = NodeData(node_ids, ndata, graph_id=ngraph_ids)
354
    egraph_ids = ['graph_{}'.format(id) for id in egraph_ids]
355
356
    edata = {'feat': np.random.rand(
        num_edges*num_graphs, num_dims), 'label': np.random.randint(2, size=num_edges*num_graphs)}
357
    edge_data = EdgeData(src_ids, dst_ids, edata, graph_id=egraph_ids)
358
359
    gdata = {'feat': np.random.rand(num_graphs, num_dims),
             'label': np.random.randint(2, size=num_graphs)}
360
361
    graph_ids = ['graph_{}'.format(id) for id in np.arange(num_graphs)]
    graph_data = GraphData(graph_ids, gdata)
362
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
        node_data, edge_data, graph_data)
    assert len(graphs) == num_graphs
    assert len(data_dict) == len(gdata)
    for k, v in data_dict.items():
        assert F.array_equal(F.tensor(gdata[k]), v)
    for i, g in enumerate(graphs):
        assert g.is_homogeneous
        assert g.num_nodes() == num_nodes
        assert g.num_edges() == num_edges

        def assert_data(lhs, rhs, size, node=False):
            for key, value in lhs.items():
                assert key in rhs
                value = value[i*size:(i+1)*size]
                if node:
                    indices = u_indices[i*size:(i+1)*size]
                    value = value[indices]
                assert F.array_equal(F.tensor(value), rhs[key])
        assert_data(ndata, g.ndata, num_nodes, node=True)
        assert_data(edata, g.edata, num_edges)

    # Graph IDs found in node/edge CSV but not in graph CSV
385
    graph_data = GraphData(np.arange(num_graphs-2), {})
386
387
    expect_except = False
    try:
388
        _, _ = DGLGraphConstructor.construct_graphs(
389
390
391
392
393
394
395
            node_data, edge_data, graph_data)
    except:
        expect_except = True
    assert expect_except


def _test_DefaultDataParser():
396
    from dgl.data.csv_dataset_base import DefaultDataParser
397
398
399
400
401
402
403
404
405
406
407
408
409
    # common csv
    with tempfile.TemporaryDirectory() as test_dir:
        csv_path = os.path.join(test_dir, "nodes.csv")
        num_nodes = 5
        num_labels = 3
        num_dims = 2
        node_id = np.arange(num_nodes)
        label = np.random.randint(num_labels, size=num_nodes)
        feat = np.random.rand(num_nodes, num_dims)
        df = pd.DataFrame({'node_id': node_id, 'label': label,
                           'feat': [line.tolist() for line in feat],
                           })
        df.to_csv(csv_path, index=False)
410
        dp = DefaultDataParser()
411
412
413
414
415
416
417
418
419
420
421
        df = pd.read_csv(csv_path)
        dt = dp(df)
        assert np.array_equal(node_id, dt['node_id'])
        assert np.array_equal(label, dt['label'])
        assert np.array_equal(feat, dt['feat'])
    # string consists of non-numeric values
    with tempfile.TemporaryDirectory() as test_dir:
        csv_path = os.path.join(test_dir, "nodes.csv")
        df = pd.DataFrame({'label': ['a', 'b', 'c'],
                           })
        df.to_csv(csv_path, index=False)
422
        dp = DefaultDataParser()
423
424
425
426
427
428
429
430
431
432
433
434
435
        df = pd.read_csv(csv_path)
        expect_except = False
        try:
            dt = dp(df)
        except:
            expect_except = True
        assert expect_except
    # csv has index column which is ignored as it's unnamed
    with tempfile.TemporaryDirectory() as test_dir:
        csv_path = os.path.join(test_dir, "nodes.csv")
        df = pd.DataFrame({'label': [1, 2, 3],
                           })
        df.to_csv(csv_path)
436
        dp = DefaultDataParser()
437
438
439
440
441
442
        df = pd.read_csv(csv_path)
        dt = dp(df)
        assert len(dt) == 1


def _test_load_yaml_with_sanity_check():
443
    from dgl.data.csv_dataset_base import load_yaml_with_sanity_check
444
445
446
447
448
449
450
    with tempfile.TemporaryDirectory() as test_dir:
        yaml_path = os.path.join(test_dir, 'meta.yaml')
        # workable but meaningless usually
        yaml_data = {'dataset_name': 'default',
                     'node_data': [], 'edge_data': []}
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
451
        meta = load_yaml_with_sanity_check(yaml_path)
452
453
454
455
456
457
458
459
460
461
462
463
        assert meta.version == '1.0.0'
        assert meta.dataset_name == 'default'
        assert meta.separator == ','
        assert len(meta.node_data) == 0
        assert len(meta.edge_data) == 0
        assert meta.graph_data is None
        # minimum with required fields only
        yaml_data = {'version': '1.0.0', 'dataset_name': 'default', 'node_data': [{'file_name': 'nodes.csv'}],
                     'edge_data': [{'file_name': 'edges.csv'}],
                     }
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
464
        meta = load_yaml_with_sanity_check(yaml_path)
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
        for ndata in meta.node_data:
            assert ndata.file_name == 'nodes.csv'
            assert ndata.ntype == '_V'
            assert ndata.graph_id_field == 'graph_id'
            assert ndata.node_id_field == 'node_id'
        for edata in meta.edge_data:
            assert edata.file_name == 'edges.csv'
            assert edata.etype == ['_V', '_E', '_V']
            assert edata.graph_id_field == 'graph_id'
            assert edata.src_id_field == 'src_id'
            assert edata.dst_id_field == 'dst_id'
        # optional fields are specified
        yaml_data = {'version': '1.0.0', 'dataset_name': 'default',
                     'separator': '|',
                     'node_data': [{'file_name': 'nodes.csv', 'ntype': 'user', 'graph_id_field': 'xxx', 'node_id_field': 'xxx'}],
                     'edge_data': [{'file_name': 'edges.csv', 'etype': ['user', 'follow', 'user'], 'graph_id_field':'xxx', 'src_id_field':'xxx', 'dst_id_field':'xxx'}],
                     'graph_data': {'file_name': 'graph.csv', 'graph_id_field': 'xxx'}
                     }
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
485
        meta = load_yaml_with_sanity_check(yaml_path)
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
        assert len(meta.node_data) == 1
        ndata = meta.node_data[0]
        assert ndata.ntype == 'user'
        assert ndata.graph_id_field == 'xxx'
        assert ndata.node_id_field == 'xxx'
        assert len(meta.edge_data) == 1
        edata = meta.edge_data[0]
        assert edata.etype == ['user', 'follow', 'user']
        assert edata.graph_id_field == 'xxx'
        assert edata.src_id_field == 'xxx'
        assert edata.dst_id_field == 'xxx'
        assert meta.graph_data is not None
        assert meta.graph_data.file_name == 'graph.csv'
        assert meta.graph_data.graph_id_field == 'xxx'
        # some required fields are missing
        yaml_data = {'dataset_name': 'default',
                     'node_data': [], 'edge_data': []}
        for field in yaml_data.keys():
            ydata = {k: v for k, v in yaml_data.items()}
            ydata.pop(field)
            with open(yaml_path, 'w') as f:
                yaml.dump(ydata, f, sort_keys=False)
            expect_except = False
            try:
510
                meta = load_yaml_with_sanity_check(yaml_path)
511
512
513
514
515
516
517
518
519
520
521
            except:
                expect_except = True
            assert expect_except
        # inapplicable version
        yaml_data = {'version': '0.0.0', 'dataset_name': 'default', 'node_data': [{'file_name': 'nodes_0.csv'}],
                     'edge_data': [{'file_name': 'edges_0.csv'}],
                     }
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
522
            meta = load_yaml_with_sanity_check(yaml_path)
523
524
525
526
527
528
529
530
531
532
533
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate node types
        yaml_data = {'version': '1.0.0', 'dataset_name': 'default', 'node_data': [{'file_name': 'nodes.csv'}, {'file_name': 'nodes.csv'}],
                     'edge_data': [{'file_name': 'edges.csv'}],
                     }
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
534
            meta = load_yaml_with_sanity_check(yaml_path)
535
536
537
538
539
540
541
542
543
544
545
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate edge types
        yaml_data = {'version': '1.0.0', 'dataset_name': 'default', 'node_data': [{'file_name': 'nodes.csv'}],
                     'edge_data': [{'file_name': 'edges.csv'}, {'file_name': 'edges.csv'}],
                     }
        with open(yaml_path, 'w') as f:
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
546
            meta = load_yaml_with_sanity_check(yaml_path)
547
548
549
550
551
552
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_node_data_from_csv():
553
    from dgl.data.csv_dataset_base import MetaNode, NodeData, DefaultDataParser
554
555
556
557
558
559
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        # minimum
        df = pd.DataFrame({'node_id': np.arange(num_nodes)})
        csv_path = os.path.join(test_dir, 'nodes.csv')
        df.to_csv(csv_path, index=False)
560
561
562
        meta_node = MetaNode(file_name=csv_path)
        node_data = NodeData.load_from_csv(
            meta_node, DefaultDataParser())
563
564
565
566
567
568
569
570
        assert np.array_equal(df['node_id'], node_data.id)
        assert len(node_data.data) == 0

        # common case
        df = pd.DataFrame({'node_id': np.arange(num_nodes),
                          'label': np.random.randint(3, size=num_nodes)})
        csv_path = os.path.join(test_dir, 'nodes.csv')
        df.to_csv(csv_path, index=False)
571
572
573
        meta_node = MetaNode(file_name=csv_path)
        node_data = NodeData.load_from_csv(
            meta_node, DefaultDataParser())
574
575
576
577
578
579
580
581
582
583
584
        assert np.array_equal(df['node_id'], node_data.id)
        assert len(node_data.data) == 1
        assert np.array_equal(df['label'], node_data.data['label'])
        assert np.array_equal(np.full(num_nodes, 0), node_data.graph_id)
        assert node_data.type == '_V'

        # add more fields into nodes.csv
        df = pd.DataFrame({'node_id': np.arange(num_nodes), 'label': np.random.randint(
            3, size=num_nodes), 'graph_id': np.full(num_nodes, 1)})
        csv_path = os.path.join(test_dir, 'nodes.csv')
        df.to_csv(csv_path, index=False)
585
586
587
        meta_node = MetaNode(file_name=csv_path)
        node_data = NodeData.load_from_csv(
            meta_node, DefaultDataParser())
588
589
590
591
592
593
594
595
596
597
        assert np.array_equal(df['node_id'], node_data.id)
        assert len(node_data.data) == 1
        assert np.array_equal(df['label'], node_data.data['label'])
        assert np.array_equal(df['graph_id'], node_data.graph_id)
        assert node_data.type == '_V'

        # required header is missing
        df = pd.DataFrame({'label': np.random.randint(3, size=num_nodes)})
        csv_path = os.path.join(test_dir, 'nodes.csv')
        df.to_csv(csv_path, index=False)
598
        meta_node = MetaNode(file_name=csv_path)
599
600
        expect_except = False
        try:
601
602
            NodeData.load_from_csv(
                meta_node, DefaultDataParser())
603
604
605
606
607
608
        except:
            expect_except = True
        assert expect_except


def _test_load_edge_data_from_csv():
609
    from dgl.data.csv_dataset_base import MetaEdge, EdgeData, DefaultDataParser
610
611
612
613
614
615
616
617
618
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        num_edges = 1000
        # minimum
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           'dst_id': np.random.randint(num_nodes, size=num_edges),
                           })
        csv_path = os.path.join(test_dir, 'edges.csv')
        df.to_csv(csv_path, index=False)
619
620
621
        meta_edge = MetaEdge(file_name=csv_path)
        edge_data = EdgeData.load_from_csv(
            meta_edge, DefaultDataParser())
622
623
624
625
626
627
628
629
630
631
        assert np.array_equal(df['src_id'], edge_data.src)
        assert np.array_equal(df['dst_id'], edge_data.dst)
        assert len(edge_data.data) == 0

        # common case
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           'dst_id': np.random.randint(num_nodes, size=num_edges),
                           'label': np.random.randint(3, size=num_edges)})
        csv_path = os.path.join(test_dir, 'edges.csv')
        df.to_csv(csv_path, index=False)
632
633
634
        meta_edge = MetaEdge(file_name=csv_path)
        edge_data = EdgeData.load_from_csv(
            meta_edge, DefaultDataParser())
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
        assert np.array_equal(df['src_id'], edge_data.src)
        assert np.array_equal(df['dst_id'], edge_data.dst)
        assert len(edge_data.data) == 1
        assert np.array_equal(df['label'], edge_data.data['label'])
        assert np.array_equal(np.full(num_edges, 0), edge_data.graph_id)
        assert edge_data.type == ('_V', '_E', '_V')

        # add more fields into edges.csv
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           'dst_id': np.random.randint(num_nodes, size=num_edges),
                           'graph_id': np.arange(num_edges),
                           'feat': np.random.randint(3, size=num_edges),
                           'label': np.random.randint(3, size=num_edges)})
        csv_path = os.path.join(test_dir, 'edges.csv')
        df.to_csv(csv_path, index=False)
650
651
652
        meta_edge = MetaEdge(file_name=csv_path)
        edge_data = EdgeData.load_from_csv(
            meta_edge, DefaultDataParser())
653
654
655
656
657
658
659
660
661
662
663
664
665
        assert np.array_equal(df['src_id'], edge_data.src)
        assert np.array_equal(df['dst_id'], edge_data.dst)
        assert len(edge_data.data) == 2
        assert np.array_equal(df['feat'], edge_data.data['feat'])
        assert np.array_equal(df['label'], edge_data.data['label'])
        assert np.array_equal(df['graph_id'], edge_data.graph_id)
        assert edge_data.type == ('_V', '_E', '_V')

        # required headers are missing
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           })
        csv_path = os.path.join(test_dir, 'edges.csv')
        df.to_csv(csv_path, index=False)
666
        meta_edge = MetaEdge(file_name=csv_path)
667
668
        expect_except = False
        try:
669
670
            EdgeData.load_from_csv(
                meta_edge, DefaultDataParser())
671
672
673
674
675
676
677
        except DGLError:
            expect_except = True
        assert expect_except
        df = pd.DataFrame({'dst_id': np.random.randint(num_nodes, size=num_edges),
                           })
        csv_path = os.path.join(test_dir, 'edges.csv')
        df.to_csv(csv_path, index=False)
678
        meta_edge = MetaEdge(file_name=csv_path)
679
680
        expect_except = False
        try:
681
682
            EdgeData.load_from_csv(
                meta_edge, DefaultDataParser())
683
684
685
686
687
688
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_graph_data_from_csv():
689
    from dgl.data.csv_dataset_base import MetaGraph, GraphData, DefaultDataParser
690
691
692
693
694
695
    with tempfile.TemporaryDirectory() as test_dir:
        num_graphs = 100
        # minimum
        df = pd.DataFrame({'graph_id': np.arange(num_graphs)})
        csv_path = os.path.join(test_dir, 'graph.csv')
        df.to_csv(csv_path, index=False)
696
697
698
        meta_graph = MetaGraph(file_name=csv_path)
        graph_data = GraphData.load_from_csv(
            meta_graph, DefaultDataParser())
699
700
701
702
703
704
705
706
        assert np.array_equal(df['graph_id'], graph_data.graph_id)
        assert len(graph_data.data) == 0

        # common case
        df = pd.DataFrame({'graph_id': np.arange(num_graphs),
                          'label': np.random.randint(3, size=num_graphs)})
        csv_path = os.path.join(test_dir, 'graph.csv')
        df.to_csv(csv_path, index=False)
707
708
709
        meta_graph = MetaGraph(file_name=csv_path)
        graph_data = GraphData.load_from_csv(
            meta_graph, DefaultDataParser())
710
711
712
713
714
715
716
717
718
719
        assert np.array_equal(df['graph_id'], graph_data.graph_id)
        assert len(graph_data.data) == 1
        assert np.array_equal(df['label'], graph_data.data['label'])

        # add more fields into graph.csv
        df = pd.DataFrame({'graph_id': np.arange(num_graphs),
                           'feat': np.random.randint(3, size=num_graphs),
                           'label': np.random.randint(3, size=num_graphs)})
        csv_path = os.path.join(test_dir, 'graph.csv')
        df.to_csv(csv_path, index=False)
720
721
722
        meta_graph = MetaGraph(file_name=csv_path)
        graph_data = GraphData.load_from_csv(
            meta_graph, DefaultDataParser())
723
724
725
726
727
728
729
730
731
        assert np.array_equal(df['graph_id'], graph_data.graph_id)
        assert len(graph_data.data) == 2
        assert np.array_equal(df['feat'], graph_data.data['feat'])
        assert np.array_equal(df['label'], graph_data.data['label'])

        # required header is missing
        df = pd.DataFrame({'label': np.random.randint(3, size=num_graphs)})
        csv_path = os.path.join(test_dir, 'graph.csv')
        df.to_csv(csv_path, index=False)
732
        meta_graph = MetaGraph(file_name=csv_path)
733
734
        expect_except = False
        try:
735
736
            GraphData.load_from_csv(
                meta_graph, DefaultDataParser())
737
738
739
740
741
        except DGLError:
            expect_except = True
        assert expect_except


742
def _test_CSVDataset_single():
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        meta_yaml_data = {'version': '1.0.0', 'dataset_name': 'default_name',
                          'node_data': [{'file_name': os.path.basename(nodes_csv_path_0),
                                         'ntype': 'user',
                                         },
                                        {'file_name': os.path.basename(nodes_csv_path_1),
                                            'ntype': 'item',
                                         }],
                          'edge_data': [{'file_name': os.path.basename(edges_csv_path_0),
                                         'etype': ['user', 'follow', 'user'],
                                         },
                                        {'file_name': os.path.basename(edges_csv_path_1),
                                         'etype': ['user', 'like', 'item'],
                                         }],
                          }
        with open(meta_yaml_path, 'w') as f:
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes)
        df = pd.DataFrame({'node_id': np.arange(num_nodes),
                           'label': label_ndata,
                           'feat': [line.tolist() for line in feat_ndata],
                           })
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
        feat_edata = np.random.rand(num_edges, num_dims)
        label_edata = np.random.randint(2, size=num_edges)
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           'dst_id': np.random.randint(num_nodes, size=num_edges),
                           'label': label_edata,
                           'feat': [line.tolist() for line in feat_edata],
                           })
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)

        # load CSVDataset
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
793
            csv_dataset = data.CSVDataset(
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
                test_dir, force_reload=force_reload)
            assert len(csv_dataset) == 1
            g = csv_dataset[0]
            assert not g.is_homogeneous
            assert csv_dataset.has_cache()
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
                assert F.array_equal(F.tensor(feat_ndata),
                                     g.nodes[ntype].data['feat'])
                assert np.array_equal(label_ndata,
                                      F.asnumpy(g.nodes[ntype].data['label']))
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
                assert F.array_equal(F.tensor(feat_edata),
                                     g.edges[etype].data['feat'])
                assert np.array_equal(label_edata,
                                      F.asnumpy(g.edges[etype].data['label']))


813
def _test_CSVDataset_multiple():
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
        meta_yaml_data = {'version': '1.0.0', 'dataset_name': 'default_name',
                          'node_data': [{'file_name': os.path.basename(nodes_csv_path_0),
                                         'ntype': 'user',
                                         },
                                        {'file_name': os.path.basename(nodes_csv_path_1),
                                            'ntype': 'item',
                                         }],
                          'edge_data': [{'file_name': os.path.basename(edges_csv_path_0),
                                         'etype': ['user', 'follow', 'user'],
                                         },
                                        {'file_name': os.path.basename(edges_csv_path_1),
                                         'etype': ['user', 'like', 'item'],
                                         }],
                          'graph_data': {'file_name': os.path.basename(graph_csv_path)}
                          }
        with open(meta_yaml_path, 'w') as f:
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
        num_dims = 3
        feat_ndata = np.random.rand(num_nodes*num_graphs, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes*num_graphs)
        df = pd.DataFrame({'node_id': np.hstack([np.arange(num_nodes) for _ in range(num_graphs)]),
                           'label': label_ndata,
                           'feat': [line.tolist() for line in feat_ndata],
                           'graph_id': np.hstack([np.full(num_nodes, i) for i in range(num_graphs)])
                           })
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
        feat_edata = np.random.rand(num_edges*num_graphs, num_dims)
        label_edata = np.random.randint(2, size=num_edges*num_graphs)
        df = pd.DataFrame({'src_id': np.hstack([np.random.randint(num_nodes, size=num_edges) for _ in range(num_graphs)]),
                           'dst_id': np.hstack([np.random.randint(num_nodes, size=num_edges) for _ in range(num_graphs)]),
                           'label': label_edata,
                           'feat': [line.tolist() for line in feat_edata],
                           'graph_id': np.hstack([np.full(num_edges, i) for i in range(num_graphs)])
                           })
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        feat_gdata = np.random.rand(num_graphs, num_dims)
        label_gdata = np.random.randint(2, size=num_graphs)
        df = pd.DataFrame({'label': label_gdata,
                           'feat': [line.tolist() for line in feat_gdata],
                           'graph_id': np.arange(num_graphs)
                           })
        df.to_csv(graph_csv_path, index=False)

870
        # load CSVDataset with default node/edge/gdata_parser
871
872
873
874
875
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
876
            csv_dataset = data.CSVDataset(
877
878
879
880
881
882
883
884
                test_dir, force_reload=force_reload)
            assert len(csv_dataset) == num_graphs
            assert csv_dataset.has_cache()
            assert len(csv_dataset.data) == 2
            assert 'feat' in csv_dataset.data
            assert 'label' in csv_dataset.data
            assert F.array_equal(F.tensor(feat_gdata),
                                 csv_dataset.data['feat'])
885
            for i, (g, g_data) in enumerate(csv_dataset):
886
                assert not g.is_homogeneous
887
888
                assert F.asnumpy(g_data['label']) == label_gdata[i]
                assert F.array_equal(g_data['feat'], F.tensor(feat_gdata[i]))
889
890
891
892
893
894
895
896
897
898
899
900
901
902
                for ntype in g.ntypes:
                    assert g.num_nodes(ntype) == num_nodes
                    assert F.array_equal(F.tensor(feat_ndata[i*num_nodes:(i+1)*num_nodes]),
                                         g.nodes[ntype].data['feat'])
                    assert np.array_equal(label_ndata[i*num_nodes:(i+1)*num_nodes],
                                          F.asnumpy(g.nodes[ntype].data['label']))
                for etype in g.etypes:
                    assert g.num_edges(etype) == num_edges
                    assert F.array_equal(F.tensor(feat_edata[i*num_edges:(i+1)*num_edges]),
                                         g.edges[etype].data['feat'])
                    assert np.array_equal(label_edata[i*num_edges:(i+1)*num_edges],
                                          F.asnumpy(g.edges[etype].data['label']))


903
def _test_CSVDataset_customized_data_parser():
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
        meta_yaml_data = {'dataset_name': 'default_name',
                          'node_data': [{'file_name': os.path.basename(nodes_csv_path_0),
                                         'ntype': 'user',
                                         },
                                        {'file_name': os.path.basename(nodes_csv_path_1),
                                            'ntype': 'item',
                                         }],
                          'edge_data': [{'file_name': os.path.basename(edges_csv_path_0),
                                         'etype': ['user', 'follow', 'user'],
                                         },
                                        {'file_name': os.path.basename(edges_csv_path_1),
                                         'etype': ['user', 'like', 'item'],
                                         }],
                          'graph_data': {'file_name': os.path.basename(graph_csv_path)}
                          }
        with open(meta_yaml_path, 'w') as f:
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
        label_ndata = np.random.randint(2, size=num_nodes*num_graphs)
        df = pd.DataFrame({'node_id': np.hstack([np.arange(num_nodes) for _ in range(num_graphs)]),
                           'label': label_ndata,
                           'graph_id': np.hstack([np.full(num_nodes, i) for i in range(num_graphs)])
                           })
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
        label_edata = np.random.randint(2, size=num_edges*num_graphs)
        df = pd.DataFrame({'src_id': np.hstack([np.random.randint(num_nodes, size=num_edges) for _ in range(num_graphs)]),
                           'dst_id': np.hstack([np.random.randint(num_nodes, size=num_edges) for _ in range(num_graphs)]),
                           'label': label_edata,
                           'graph_id': np.hstack([np.full(num_edges, i) for i in range(num_graphs)])
                           })
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        label_gdata = np.random.randint(2, size=num_graphs)
        df = pd.DataFrame({'label': label_gdata,
                           'graph_id': np.arange(num_graphs)
                           })
        df.to_csv(graph_csv_path, index=False)

        class CustDataParser:
            def __call__(self, df):
                data = {}
                for header in df:
                    dt = df[header].to_numpy().squeeze()
                    if header == 'label':
                        dt += 2
                    data[header] = dt
                return data
962
963
964
965
966
967
        # load CSVDataset with customized node/edge/gdata_parser
        # specify via dict[ntype/etype, callable]
        csv_dataset = data.CSVDataset(
            test_dir, force_reload=True, ndata_parser={'user': CustDataParser()},
            edata_parser={('user', 'like', 'item'): CustDataParser()},
            gdata_parser=CustDataParser())
968
969
970
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
        assert 'label' in csv_dataset.data
971
        for i, (g, g_data) in enumerate(csv_dataset):
972
            assert not g.is_homogeneous
973
            assert F.asnumpy(g_data['label']) == label_gdata[i] + 2
974
975
976
977
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
                offset = 2 if ntype == 'user' else 0
                assert np.array_equal(label_ndata[i*num_nodes:(i+1)*num_nodes]+offset,
978
                                    F.asnumpy(g.nodes[ntype].data['label']))
979
980
981
982
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
                offset = 2 if etype == 'like' else 0
                assert np.array_equal(label_edata[i*num_edges:(i+1)*num_edges]+offset,
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
                                    F.asnumpy(g.edges[etype].data['label']))
        # specify via callable
        csv_dataset = data.CSVDataset(
            test_dir, force_reload=True, ndata_parser=CustDataParser(),
            edata_parser=CustDataParser(), gdata_parser=CustDataParser())
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
        assert 'label' in csv_dataset.data
        for i, (g, g_data) in enumerate(csv_dataset):
            assert not g.is_homogeneous
            assert F.asnumpy(g_data['label']) == label_gdata[i] + 2
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
                offset = 2
                assert np.array_equal(label_ndata[i*num_nodes:(i+1)*num_nodes]+offset,
                                    F.asnumpy(g.nodes[ntype].data['label']))
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
                offset = 2
                assert np.array_equal(label_edata[i*num_edges:(i+1)*num_edges]+offset,
                                    F.asnumpy(g.edges[etype].data['label']))
1004
1005
1006


def _test_NodeEdgeGraphData():
1007
    from dgl.data.csv_dataset_base import NodeData, EdgeData, GraphData
1008
1009
1010
    # NodeData basics
    num_nodes = 100
    node_ids = np.arange(num_nodes, dtype=np.float)
1011
    ndata = NodeData(node_ids, {})
1012
    assert np.array_equal(ndata.id, node_ids)
1013
1014
1015
1016
1017
1018
    assert len(ndata.data) == 0
    assert ndata.type == '_V'
    assert np.array_equal(ndata.graph_id, np.full(num_nodes, 0))
    # NodeData more
    data = {'feat': np.random.rand(num_nodes, 3)}
    graph_id = np.arange(num_nodes)
1019
    ndata = NodeData(node_ids, data, type='user', graph_id=graph_id)
1020
1021
1022
1023
1024
1025
1026
1027
1028
    assert ndata.type == 'user'
    assert np.array_equal(ndata.graph_id, graph_id)
    assert len(ndata.data) == len(data)
    for k, v in data.items():
        assert k in ndata.data
        assert np.array_equal(ndata.data[k], v)
    # NodeData except
    expect_except = False
    try:
1029
        NodeData(np.arange(num_nodes), {'feat': np.random.rand(
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
            num_nodes+1, 3)}, graph_id=np.arange(num_nodes-1))
    except:
        expect_except = True
    assert expect_except

    # EdgeData basics
    num_nodes = 100
    num_edges = 1000
    src_ids = np.random.randint(num_nodes, size=num_edges)
    dst_ids = np.random.randint(num_nodes, size=num_edges)
1040
    edata = EdgeData(src_ids, dst_ids, {})
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
    assert edata.type == ('_V', '_E', '_V')
    assert len(edata.data) == 0
    assert np.array_equal(edata.graph_id, np.full(num_edges, 0))
    # EdageData more
    src_ids = np.random.randint(num_nodes, size=num_edges).astype(np.float)
    dst_ids = np.random.randint(num_nodes, size=num_edges).astype(np.float)
    data = {'feat': np.random.rand(num_edges, 3)}
    etype = ('user', 'like', 'item')
    graph_ids = np.arange(num_edges)
1052
    edata = EdgeData(src_ids, dst_ids, data,
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
                            type=etype, graph_id=graph_ids)
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
    assert edata.type == etype
    assert len(edata.data) == len(data)
    for k, v in data.items():
        assert k in edata.data
        assert np.array_equal(edata.data[k], v)
    assert np.array_equal(edata.graph_id, graph_ids)
    # EdgeData except
    expect_except = False
    try:
1065
        EdgeData(np.arange(num_edges), np.arange(
1066
1067
1068
1069
1070
1071
1072
1073
            num_edges+1), {'feat': np.random.rand(num_edges-1, 3)}, graph_id=np.arange(num_edges+2))
    except:
        expect_except = True
    assert expect_except

    # GraphData basics
    num_graphs = 10
    graph_ids = np.arange(num_graphs)
1074
    gdata = GraphData(graph_ids, {})
1075
1076
1077
1078
1079
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == 0
    # GraphData more
    graph_ids = np.arange(num_graphs).astype(np.float)
    data = {'feat': np.random.rand(num_graphs, 3)}
1080
    gdata = GraphData(graph_ids, data)
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == len(data)
    for k, v in data.items():
        assert k in gdata.data
        assert np.array_equal(gdata.data[k], v)


@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_csvdataset():
    _test_NodeEdgeGraphData()
    _test_construct_graphs_homo()
    _test_construct_graphs_hetero()
    _test_construct_graphs_multiple()
    _test_DefaultDataParser()
    _test_load_yaml_with_sanity_check()
    _test_load_node_data_from_csv()
    _test_load_edge_data_from_csv()
    _test_load_graph_data_from_csv()
1099
1100
1101
    _test_CSVDataset_single()
    _test_CSVDataset_multiple()
    _test_CSVDataset_customized_data_parser()
1102

1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_add_nodepred_split():
    dataset = data.AmazonCoBuyComputerDataset()
    print('train_mask' in dataset[0].ndata)
    data.utils.add_nodepred_split(dataset, [0.8, 0.1, 0.1])
    assert 'train_mask' in dataset[0].ndata

    dataset = data.AIFBDataset()
    print('train_mask' in dataset[0].nodes['Publikationen'].data)
    data.utils.add_nodepred_split(dataset, [0.8, 0.1, 0.1], ntype='Publikationen')
    assert 'train_mask' in dataset[0].nodes['Publikationen'].data

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_as_nodepred1():
    ds = data.AmazonCoBuyComputerDataset()
    print('train_mask' in ds[0].ndata)
    new_ds = data.AsNodePredDataset(ds, [0.8, 0.1, 0.1], verbose=True)
    assert len(new_ds) == 1
    assert new_ds[0].num_nodes() == ds[0].num_nodes()
    assert new_ds[0].num_edges() == ds[0].num_edges()
    assert 'train_mask' in new_ds[0].ndata

    ds = data.AIFBDataset()
    print('train_mask' in ds[0].nodes['Personen'].data)
    new_ds = data.AsNodePredDataset(ds, [0.8, 0.1, 0.1], 'Personen', verbose=True)
    assert len(new_ds) == 1
    assert new_ds[0].ntypes == ds[0].ntypes
    assert new_ds[0].canonical_etypes == ds[0].canonical_etypes
    assert 'train_mask' in new_ds[0].nodes['Personen'].data

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_as_nodepred2():
    # test proper reprocessing

    # create
    ds = data.AsNodePredDataset(data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1])
    assert F.sum(F.astype(ds[0].ndata['train_mask'], F.int32), 0) == int(ds[0].num_nodes() * 0.8)
    # read from cache
    ds = data.AsNodePredDataset(data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1])
    assert F.sum(F.astype(ds[0].ndata['train_mask'], F.int32), 0) == int(ds[0].num_nodes() * 0.8)
    # invalid cache, re-read
    ds = data.AsNodePredDataset(data.AmazonCoBuyComputerDataset(), [0.1, 0.1, 0.8])
    assert F.sum(F.astype(ds[0].ndata['train_mask'], F.int32), 0) == int(ds[0].num_nodes() * 0.1)

    # create
    ds = data.AsNodePredDataset(data.AIFBDataset(), [0.8, 0.1, 0.1], 'Personen', verbose=True)
    assert F.sum(F.astype(ds[0].nodes['Personen'].data['train_mask'], F.int32), 0) == int(ds[0].num_nodes('Personen') * 0.8)
    # read from cache
    ds = data.AsNodePredDataset(data.AIFBDataset(), [0.8, 0.1, 0.1], 'Personen', verbose=True)
    assert F.sum(F.astype(ds[0].nodes['Personen'].data['train_mask'], F.int32), 0) == int(ds[0].num_nodes('Personen') * 0.8)
    # invalid cache, re-read
    ds = data.AsNodePredDataset(data.AIFBDataset(), [0.1, 0.1, 0.8], 'Personen', verbose=True)
    assert F.sum(F.astype(ds[0].nodes['Personen'].data['train_mask'], F.int32), 0) == int(ds[0].num_nodes('Personen') * 0.1)
1156

Jinjing Zhou's avatar
Jinjing Zhou committed
1157
1158
1159
1160
1161
1162
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason="ogb only supports pytorch")
def test_as_nodepred_ogb():
    from ogb.nodeproppred import DglNodePropPredDataset
    ds = data.AsNodePredDataset(DglNodePropPredDataset("ogbn-arxiv"), split_ratio=None, verbose=True)
    # force generate new split
    ds = data.AsNodePredDataset(DglNodePropPredDataset("ogbn-arxiv"), split_ratio=[0.7, 0.2, 0.1], verbose=True)
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188

@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_as_linkpred():
    # create
    ds = data.AsLinkPredDataset(data.CoraGraphDataset(), split_ratio=[0.8, 0.1, 0.1], neg_ratio=1, verbose=True)
    # Cora has 10556 edges, 10% test edges can be 1057
    assert ds.test_edges[0][0].shape[0] == 1057
    # negative samples, not guaranteed, so the assert is in a relaxed range
    assert 1000 <= ds.test_edges[1][0].shape[0] <= 1057
    # read from cache
    ds = data.AsLinkPredDataset(data.CoraGraphDataset(), split_ratio=[0.7, 0.1, 0.2], neg_ratio=2, verbose=True)
    assert ds.test_edges[0][0].shape[0] == 2112
    # negative samples, not guaranteed to be ratio 2, so the assert is in a relaxed range
    assert 4000 < ds.test_edges[1][0].shape[0] <= 4224


@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason="ogb only supports pytorch")
def test_as_linkpred_ogb():
    from ogb.linkproppred import DglLinkPropPredDataset
    ds = data.AsLinkPredDataset(DglLinkPropPredDataset("ogbl-collab"), split_ratio=None, verbose=True)
    # original dataset has 46329 test edges
    assert ds.test_edges[0][0].shape[0] == 46329
    # force generate new split
    ds = data.AsLinkPredDataset(DglLinkPropPredDataset("ogbl-collab"), split_ratio=[0.7, 0.2, 0.1], verbose=True)
    assert ds.test_edges[0][0].shape[0] == 235812

1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_as_nodepred_csvdataset():
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path = os.path.join(test_dir, "test_edges.csv")
        nodes_csv_path = os.path.join(test_dir, "test_nodes.csv")
        meta_yaml_data = {'version': '1.0.0', 'dataset_name': 'default_name',
                          'node_data': [{'file_name': os.path.basename(nodes_csv_path)
                                         }],
                          'edge_data': [{'file_name': os.path.basename(edges_csv_path)
                                         }],
                          }
        with open(meta_yaml_path, 'w') as f:
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        num_classes = num_nodes
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.arange(num_classes)
        df = pd.DataFrame({'node_id': np.arange(num_nodes),
                           'label': label_ndata,
                           'feat': [line.tolist() for line in feat_ndata],
                           })
        df.to_csv(nodes_csv_path, index=False)
        df = pd.DataFrame({'src_id': np.random.randint(num_nodes, size=num_edges),
                           'dst_id': np.random.randint(num_nodes, size=num_edges),
                           })
        df.to_csv(edges_csv_path, index=False)

1220
        ds = data.CSVDataset(test_dir, force_reload=True)
1221
1222
1223
1224
        assert 'feat' in ds[0].ndata
        assert 'label' in ds[0].ndata
        assert 'train_mask' not in ds[0].ndata
        assert not hasattr(ds[0], 'num_classes')
1225
        new_ds = data.AsNodePredDataset(ds, split_ratio=[0.8, 0.1, 0.1], force_reload=True)
1226
1227
1228
1229
1230
        assert new_ds.num_classes == num_classes
        assert 'feat' in new_ds[0].ndata
        assert 'label' in new_ds[0].ndata
        assert 'train_mask' in new_ds[0].ndata

1231
if __name__ == '__main__':
1232
    test_minigc()
1233
    test_gin()
1234
    test_data_hash()
1235
1236
1237
    test_tudataset_regression()
    test_fraud()
    test_fakenews()
1238
    test_extract_archive()
1239
    test_csvdataset()
1240
1241
1242
    test_add_nodepred_split()
    test_as_nodepred1()
    test_as_nodepred2()
1243
    test_as_nodepred_csvdataset()