test_data.py 71.5 KB
Newer Older
1
import gzip
2
import io
3
import os
4
import tarfile
5
import tempfile
6
import unittest
Andrei Ivanov's avatar
Andrei Ivanov committed
7
import warnings
8

9
import backend as F
10
11
12

import dgl
import dgl.data as data
13
import numpy as np
14
15
import pandas as pd
import pytest
16
import yaml
17
from dgl import DGLError
18

19
20
21
22
23

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
24
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
25
26
27
28
def test_minigc():
    ds = data.MiniGCDataset(16, 10, 20)
    g, l = list(zip(*ds))
    print(g, l)
29
30
31
32
33
    g1 = ds[0][0]
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    ds = data.MiniGCDataset(16, 10, 20, transform=transform)
    g2 = ds[0][0]
    assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
34

35
36
37
38
39

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
40
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
41
42
def test_gin():
    ds_n_graphs = {
43
44
45
46
47
        "MUTAG": 188,
        "IMDBBINARY": 1000,
        "IMDBMULTI": 1500,
        "PROTEINS": 1113,
        "PTC": 344,
48
    }
49
    transform = dgl.AddSelfLoop(allow_duplicate=True)
50
51
52
    for name, n_graphs in ds_n_graphs.items():
        ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False)
        assert len(ds) == n_graphs, (len(ds), name)
53
        g1 = ds[0][0]
54
55
56
        ds = data.GINDataset(
            name, self_loop=False, degree_as_nlabel=False, transform=transform
        )
57
58
        g2 = ds[0][0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
Mufei Li's avatar
Mufei Li committed
59
        assert ds.num_classes == ds.gclasses
60

61
62
63
64
65

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
66
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
67
def test_fraud():
68
69
    transform = dgl.AddSelfLoop(allow_duplicate=True)

70
    g = data.FraudDataset("amazon")[0]
71
    assert g.num_nodes() == 11944
72
    num_edges1 = g.num_edges()
73
    g2 = data.FraudDataset("amazon", transform=transform)[0]
74
75
    # 3 edge types
    assert g2.num_edges() - num_edges1 == g.num_nodes() * 3
76
77
78

    g = data.FraudAmazonDataset()[0]
    assert g.num_nodes() == 11944
79
80
81
    g2 = data.FraudAmazonDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
82
83
84

    g = data.FraudYelpDataset()[0]
    assert g.num_nodes() == 45954
85
86
87
    g2 = data.FraudYelpDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
88

89
90
91
92
93

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
94
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
95
def test_tudataset_regression():
96
    ds = data.TUDataset("ZINC_test", force_reload=True)
Mufei Li's avatar
Mufei Li committed
97
    assert ds.num_classes == ds.num_labels
Jinjing Zhou's avatar
Jinjing Zhou committed
98
    assert len(ds) == 5000
99
    g = ds[0][0]
Jinjing Zhou's avatar
Jinjing Zhou committed
100

101
    transform = dgl.AddSelfLoop(allow_duplicate=True)
102
    ds = data.TUDataset("ZINC_test", force_reload=True, transform=transform)
103
104
    g2 = ds[0][0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
105

106
107
108
109
110

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
111
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
112
113
114
def test_data_hash():
    class HashTestDataset(data.DGLDataset):
        def __init__(self, hash_key=()):
115
            super(HashTestDataset, self).__init__("hashtest", hash_key=hash_key)
116

117
118
119
        def _load(self):
            pass

120
121
122
    a = HashTestDataset((True, 0, "1", (1, 2, 3)))
    b = HashTestDataset((True, 0, "1", (1, 2, 3)))
    c = HashTestDataset((True, 0, "1", (1, 2, 4)))
123
124
125
    assert a.hash == b.hash
    assert a.hash != c.hash

126

127
128
129
130
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
131
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
132
def test_citation_graph():
133
134
    transform = dgl.AddSelfLoop(allow_duplicate=True)

135
    # cora
136
    g = data.CoraGraphDataset(force_reload=True, reorder=True)[0]
137
138
139
140
    assert g.num_nodes() == 2708
    assert g.num_edges() == 10556
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
141
142
    g2 = data.CoraGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
143
144

    # Citeseer
145
    g = data.CiteseerGraphDataset(force_reload=True, reorder=True)[0]
146
147
148
149
    assert g.num_nodes() == 3327
    assert g.num_edges() == 9228
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
150
151
    g2 = data.CiteseerGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
152
153

    # Pubmed
154
    g = data.PubmedGraphDataset(force_reload=True, reorder=True)[0]
155
156
157
158
    assert g.num_nodes() == 19717
    assert g.num_edges() == 88651
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
159
160
    g2 = data.PubmedGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
161
162


163
164
165
166
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
167
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
168
def test_gnn_benchmark():
169
170
    transform = dgl.AddSelfLoop(allow_duplicate=True)

171
172
173
174
175
176
    # AmazonCoBuyComputerDataset
    g = data.AmazonCoBuyComputerDataset()[0]
    assert g.num_nodes() == 13752
    assert g.num_edges() == 491722
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
177
178
    g2 = data.AmazonCoBuyComputerDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
179
180
181
182
183
184
185

    # AmazonCoBuyPhotoDataset
    g = data.AmazonCoBuyPhotoDataset()[0]
    assert g.num_nodes() == 7650
    assert g.num_edges() == 238163
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
186
187
    g2 = data.AmazonCoBuyPhotoDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
188
189
190
191
192
193
194

    # CoauthorPhysicsDataset
    g = data.CoauthorPhysicsDataset()[0]
    assert g.num_nodes() == 34493
    assert g.num_edges() == 495924
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
195
196
    g2 = data.CoauthorPhysicsDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
197
198
199
200
201
202
203

    # CoauthorCSDataset
    g = data.CoauthorCSDataset()[0]
    assert g.num_nodes() == 18333
    assert g.num_edges() == 163788
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
204
205
    g2 = data.CoauthorCSDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
206
207
208
209
210
211
212

    # CoraFullDataset
    g = data.CoraFullDataset()[0]
    assert g.num_nodes() == 19793
    assert g.num_edges() == 126842
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
213
214
    g2 = data.CoraFullDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
215
216


217
218
219
220
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
221
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
222
223
224
225
def test_explain_syn():
    dataset = data.BAShapeDataset()
    assert dataset.num_classes == 4
    g = dataset[0]
226
227
    assert "label" in g.ndata
    assert "feat" in g.ndata
228
229
230
231
232
233
234
235
236
237
238

    g1 = data.BAShapeDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.BAShapeDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.BACommunityDataset()
    assert dataset.num_classes == 8
    g = dataset[0]
239
240
    assert "label" in g.ndata
    assert "feat" in g.ndata
241
242
243
244
245
246
247
248
249
250
251

    g1 = data.BACommunityDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.BACommunityDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.TreeCycleDataset()
    assert dataset.num_classes == 2
    g = dataset[0]
252
253
    assert "label" in g.ndata
    assert "feat" in g.ndata
254
255
256
257
258
259
260
261
262
263
264

    g1 = data.TreeCycleDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.TreeCycleDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.TreeGridDataset()
    assert dataset.num_classes == 2
    g = dataset[0]
265
266
    assert "label" in g.ndata
    assert "feat" in g.ndata
267
268
269
270
271
272
273
274
275
276
277

    g1 = data.TreeGridDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.TreeGridDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.BA2MotifDataset()
    assert dataset.num_classes == 2
    g, label = dataset[0]
278
    assert "feat" in g.ndata
279

280
281
282
283
284

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
285
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
286
287
288
289
290
291
292
293
294
295
296
def test_wiki_cs():
    g = data.WikiCSDataset()[0]
    assert g.num_nodes() == 11701
    assert g.num_edges() == 431726
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.WikiCSDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()

297

298
@unittest.skip(reason="Dataset too large to download for the latest CI.")
Minjie Wang's avatar
Minjie Wang committed
299
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
300
301
302
303
304
305
306
307
308
309
310
def test_yelp():
    g = data.YelpDataset(reorder=True)[0]
    assert g.num_nodes() == 716847
    assert g.num_edges() == 13954819
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.YelpDataset(reorder=True, transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()

311
312
313
314
315

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
316
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
317
318
319
320
321
322
323
324
325
326
def test_flickr():
    g = data.FlickrDataset(reorder=True)[0]
    assert g.num_nodes() == 89250
    assert g.num_edges() == 899756
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.FlickrDataset(reorder=True, transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
327

328

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_pattern():
    mode_n_graphs = {
        "train": 10000,
        "valid": 2000,
        "test": 2000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        ds = data.PATTERNDataset(mode=mode)
        assert len(ds) == n_graphs, (len(ds), mode)
        g1 = ds[0]
        ds = data.PATTERNDataset(mode=mode, transform=transform)
        g2 = ds[0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        assert ds.num_classes == 2


351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_cluster():
    mode_n_graphs = {
        "train": 10000,
        "valid": 1000,
        "test": 1000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        ds = data.CLUSTERDataset(mode=mode)
        assert len(ds) == n_graphs, (len(ds), mode)
        g1 = ds[0]
        ds = data.CLUSTERDataset(mode=mode, transform=transform)
        g2 = ds[0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        assert ds.num_classes == 6


373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(
    dgl.backend.backend_name != "pytorch", reason="only supports pytorch"
)
def test_zinc():
    mode_n_graphs = {
        "train": 10000,
        "valid": 1000,
        "test": 1000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        dataset1 = data.ZINCDataset(mode=mode)
        g1, label = dataset1[0]
        dataset2 = data.ZINCDataset(mode=mode, transform=transform)
        g2, _ = dataset2[0]

        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        # return a scalar tensor
        assert not label.shape


@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_extract_archive():
    # gzip
    with tempfile.TemporaryDirectory() as src_dir:
        gz_file = "gz_archive"
        gz_path = os.path.join(src_dir, gz_file + ".gz")
        content = b"test extract archive gzip"
        with gzip.open(gz_path, "wb") as f:
            f.write(content)
        with tempfile.TemporaryDirectory() as dst_dir:
            data.utils.extract_archive(gz_path, dst_dir, overwrite=True)
            assert os.path.exists(os.path.join(dst_dir, gz_file))

    # tar
    with tempfile.TemporaryDirectory() as src_dir:
        tar_file = "tar_archive"
        tar_path = os.path.join(src_dir, tar_file + ".tar")
        # default encode to utf8
        content = "test extract archive tar\n".encode()
        info = tarfile.TarInfo(name="tar_archive")
        info.size = len(content)
        with tarfile.open(tar_path, "w") as f:
            f.addfile(info, io.BytesIO(content))
        with tempfile.TemporaryDirectory() as dst_dir:
            data.utils.extract_archive(tar_path, dst_dir, overwrite=True)
            assert os.path.exists(os.path.join(dst_dir, tar_file))


430
def _test_construct_graphs_node_ids():
431
432
433
434
435
436
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

437
438
439
440
441
442
443
444
445
446
447
    num_nodes = 100
    num_edges = 1000

    # node IDs are required to be unique
    node_ids = np.random.choice(np.arange(num_nodes / 2), num_nodes)
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
    node_data = NodeData(node_ids, {})
    edge_data = EdgeData(src_ids, dst_ids, {})
    expect_except = False
    try:
448
        _, _ = DGLGraphConstructor.construct_graphs(node_data, edge_data)
449
450
451
452
453
454
455
456
457
458
459
    except:
        expect_except = True
    assert expect_except

    # node IDs are already labelled from 0~num_nodes-1
    node_ids = np.arange(num_nodes)
    np.random.shuffle(node_ids)
    _, idx = np.unique(node_ids, return_index=True)
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
    node_feat = np.random.rand(num_nodes, 3)
460
    node_data = NodeData(node_ids, {"feat": node_feat})
461
462
    edge_data = EdgeData(src_ids, dst_ids, {})
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
463
464
        node_data, edge_data
    )
465
466
467
468
469
470
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == len(node_ids)
    assert g.num_edges() == len(src_ids)
471
472
473
    assert F.array_equal(
        F.tensor(node_feat[idx], dtype=F.float32), g.ndata["feat"]
    )
474
475
476

    # node IDs are mixed with numeric and non-numeric values
    # homogeneous graph
477
    node_ids = [1, 2, 3, "a"]
478
    src_ids = [1, 2, 3]
479
    dst_ids = ["a", 1, 2]
480
481
482
    node_data = NodeData(node_ids, {})
    edge_data = EdgeData(src_ids, dst_ids, {})
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
483
484
        node_data, edge_data
    )
485
486
487
488
489
490
491
492
493
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == len(node_ids)
    assert g.num_edges() == len(src_ids)

    # heterogeneous graph
    node_ids_user = [1, 2, 3]
494
    node_ids_item = ["a", "b", "c"]
495
496
    src_ids = node_ids_user
    dst_ids = node_ids_item
497
498
499
    node_data_user = NodeData(node_ids_user, {}, type="user")
    node_data_item = NodeData(node_ids_item, {}, type="item")
    edge_data = EdgeData(src_ids, dst_ids, {}, type=("user", "like", "item"))
500
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
501
502
        [node_data_user, node_data_item], edge_data
    )
503
504
505
506
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert not g.is_homogeneous
507
508
    assert g.num_nodes("user") == len(node_ids_user)
    assert g.num_nodes("item") == len(node_ids_item)
509
510
511
    assert g.num_edges() == len(src_ids)


512
def _test_construct_graphs_homo():
513
514
515
516
517
518
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

519
    # node_id could be non-sorted, non-numeric.
520
521
522
523
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
    node_ids = np.random.choice(
524
525
        np.arange(num_nodes * 2), size=num_nodes, replace=False
    )
526
    assert len(node_ids) == num_nodes
527
    # to be non-sorted
528
    np.random.shuffle(node_ids)
529
    # to be non-numeric
530
531
532
533
534
    node_ids = ["id_{}".format(id) for id in node_ids]
    t_ndata = {
        "feat": np.random.rand(num_nodes, num_dims),
        "label": np.random.randint(2, size=num_nodes),
    }
535
    _, u_indices = np.unique(node_ids, return_index=True)
536
537
538
539
    ndata = {
        "feat": t_ndata["feat"][u_indices],
        "label": t_ndata["label"][u_indices],
    }
540
    node_data = NodeData(node_ids, t_ndata)
541
542
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
543
544
545
546
    edata = {
        "feat": np.random.rand(num_edges, num_dims),
        "label": np.random.randint(2, size=num_edges),
    }
547
548
    edge_data = EdgeData(src_ids, dst_ids, edata)
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
549
550
        node_data, edge_data
    )
551
552
553
554
555
556
557
558
559
560
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == num_nodes
    assert g.num_edges() == num_edges

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
561
562
            assert F.dtype(rhs[key]) != F.float64
            assert F.array_equal(
563
564
565
                F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
            )

566
567
568
569
570
    assert_data(ndata, g.ndata)
    assert_data(edata, g.edata)


def _test_construct_graphs_hetero():
571
572
573
574
575
576
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

577
    # node_id/src_id/dst_id could be non-sorted, duplicated, non-numeric.
578
579
580
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
581
    ntypes = ["user", "item"]
582
583
584
585
586
    node_data = []
    node_ids_dict = {}
    ndata_dict = {}
    for ntype in ntypes:
        node_ids = np.random.choice(
587
588
            np.arange(num_nodes * 2), size=num_nodes, replace=False
        )
589
        assert len(node_ids) == num_nodes
590
        # to be non-sorted
591
        np.random.shuffle(node_ids)
592
        # to be non-numeric
593
594
595
596
597
        node_ids = ["id_{}".format(id) for id in node_ids]
        t_ndata = {
            "feat": np.random.rand(num_nodes, num_dims),
            "label": np.random.randint(2, size=num_nodes),
        }
598
        _, u_indices = np.unique(node_ids, return_index=True)
599
600
601
602
        ndata = {
            "feat": t_ndata["feat"][u_indices],
            "label": t_ndata["label"][u_indices],
        }
603
        node_data.append(NodeData(node_ids, t_ndata, type=ntype))
604
605
        node_ids_dict[ntype] = node_ids
        ndata_dict[ntype] = ndata
606
    etypes = [("user", "follow", "user"), ("user", "like", "item")]
607
608
609
610
611
    edge_data = []
    edata_dict = {}
    for src_type, e_type, dst_type in etypes:
        src_ids = np.random.choice(node_ids_dict[src_type], size=num_edges)
        dst_ids = np.random.choice(node_ids_dict[dst_type], size=num_edges)
612
613
614
615
616
617
618
        edata = {
            "feat": np.random.rand(num_edges, num_dims),
            "label": np.random.randint(2, size=num_edges),
        }
        edge_data.append(
            EdgeData(src_ids, dst_ids, edata, type=(src_type, e_type, dst_type))
        )
619
        edata_dict[(src_type, e_type, dst_type)] = edata
620
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
621
622
        node_data, edge_data
    )
623
624
625
626
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert not g.is_homogeneous
627
628
    assert g.num_nodes() == num_nodes * len(ntypes)
    assert g.num_edges() == num_edges * len(etypes)
629
630
631
632

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
633
634
            assert F.dtype(rhs[key]) != F.float64
            assert F.array_equal(
635
636
637
                F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
            )

638
639
640
641
642
643
644
645
646
    for ntype in g.ntypes:
        assert g.num_nodes(ntype) == num_nodes
        assert_data(ndata_dict[ntype], g.nodes[ntype].data)
    for etype in g.canonical_etypes:
        assert g.num_edges(etype) == num_edges
        assert_data(edata_dict[etype], g.edges[etype].data)


def _test_construct_graphs_multiple():
647
648
649
650
651
652
653
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        GraphData,
        NodeData,
    )

654
655
656
657
    num_nodes = 100
    num_edges = 1000
    num_graphs = 10
    num_dims = 3
658
659
660
661
662
663
    node_ids = np.array([], dtype=int)
    src_ids = np.array([], dtype=int)
    dst_ids = np.array([], dtype=int)
    ngraph_ids = np.array([], dtype=int)
    egraph_ids = np.array([], dtype=int)
    u_indices = np.array([], dtype=int)
664
665
    for i in range(num_graphs):
        l_node_ids = np.random.choice(
666
667
            np.arange(num_nodes * 2), size=num_nodes, replace=False
        )
668
669
670
671
        node_ids = np.append(node_ids, l_node_ids)
        _, l_u_indices = np.unique(l_node_ids, return_index=True)
        u_indices = np.append(u_indices, l_u_indices)
        ngraph_ids = np.append(ngraph_ids, np.full(num_nodes, i))
672
673
674
675
676
677
        src_ids = np.append(
            src_ids, np.random.choice(l_node_ids, size=num_edges)
        )
        dst_ids = np.append(
            dst_ids, np.random.choice(l_node_ids, size=num_edges)
        )
678
        egraph_ids = np.append(egraph_ids, np.full(num_edges, i))
679
680
681
682
683
    ndata = {
        "feat": np.random.rand(num_nodes * num_graphs, num_dims),
        "label": np.random.randint(2, size=num_nodes * num_graphs),
    }
    ngraph_ids = ["graph_{}".format(id) for id in ngraph_ids]
684
    node_data = NodeData(node_ids, ndata, graph_id=ngraph_ids)
685
686
687
688
689
    egraph_ids = ["graph_{}".format(id) for id in egraph_ids]
    edata = {
        "feat": np.random.rand(num_edges * num_graphs, num_dims),
        "label": np.random.randint(2, size=num_edges * num_graphs),
    }
690
    edge_data = EdgeData(src_ids, dst_ids, edata, graph_id=egraph_ids)
691
692
693
694
695
    gdata = {
        "feat": np.random.rand(num_graphs, num_dims),
        "label": np.random.randint(2, size=num_graphs),
    }
    graph_ids = ["graph_{}".format(id) for id in np.arange(num_graphs)]
696
    graph_data = GraphData(graph_ids, gdata)
697
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
698
699
        node_data, edge_data, graph_data
    )
700
701
702
    assert len(graphs) == num_graphs
    assert len(data_dict) == len(gdata)
    for k, v in data_dict.items():
703
        assert F.dtype(v) != F.float64
704
705
706
707
        assert F.array_equal(
            F.reshape(F.tensor(gdata[k], dtype=F.dtype(v)), (len(graphs), -1)),
            v,
        )
708
709
710
711
712
713
714
715
    for i, g in enumerate(graphs):
        assert g.is_homogeneous
        assert g.num_nodes() == num_nodes
        assert g.num_edges() == num_edges

        def assert_data(lhs, rhs, size, node=False):
            for key, value in lhs.items():
                assert key in rhs
716
                value = value[i * size : (i + 1) * size]
717
                if node:
718
                    indices = u_indices[i * size : (i + 1) * size]
719
                    value = value[indices]
720
721
                assert F.dtype(rhs[key]) != F.float64
                assert F.array_equal(
722
723
724
                    F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
                )

725
726
727
728
        assert_data(ndata, g.ndata, num_nodes, node=True)
        assert_data(edata, g.edata, num_edges)

    # Graph IDs found in node/edge CSV but not in graph CSV
729
    graph_data = GraphData(np.arange(num_graphs - 2), {})
730
731
    expect_except = False
    try:
732
        _, _ = DGLGraphConstructor.construct_graphs(
733
734
            node_data, edge_data, graph_data
        )
735
736
737
738
739
    except:
        expect_except = True
    assert expect_except


Andrei Ivanov's avatar
Andrei Ivanov committed
740
def _get_data_table(data_frame):
741
    from dgl.data.csv_dataset_base import DefaultDataParser
742

743
744
    with tempfile.TemporaryDirectory() as test_dir:
        csv_path = os.path.join(test_dir, "nodes.csv")
Andrei Ivanov's avatar
Andrei Ivanov committed
745
746

        data_frame.to_csv(csv_path, index=False)
747
        dp = DefaultDataParser()
748
        df = pd.read_csv(csv_path)
Andrei Ivanov's avatar
Andrei Ivanov committed
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776

    # Intercepting the warning: "Unamed column is found. Ignored...".
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        return dp(df)


def _test_DefaultDataParser():
    # common csv
    num_nodes = 5
    num_labels = 3
    num_dims = 2
    node_id = np.arange(num_nodes)
    label = np.random.randint(num_labels, size=num_nodes)
    feat = np.random.rand(num_nodes, num_dims)
    df = pd.DataFrame(
        {
            "node_id": node_id,
            "label": label,
            "feat": [line.tolist() for line in feat],
        }
    )

    dt = _get_data_table(df)
    assert np.array_equal(node_id, dt["node_id"])
    assert np.array_equal(label, dt["label"])
    assert np.array_equal(feat, dt["feat"])

777
    # string consists of non-numeric values
Andrei Ivanov's avatar
Andrei Ivanov committed
778
779
780
781
782
783
784
785
    df = pd.DataFrame({"label": ["a", "b", "c"]})
    expect_except = False
    try:
        _get_data_table(df)
    except:
        expect_except = True
    assert expect_except

786
    # csv has index column which is ignored as it's unnamed
Andrei Ivanov's avatar
Andrei Ivanov committed
787
788
789
    df = pd.DataFrame({"label": [1, 2, 3]})
    dt = _get_data_table(df)
    assert len(dt) == 1
790
791
792


def _test_load_yaml_with_sanity_check():
793
    from dgl.data.csv_dataset_base import load_yaml_with_sanity_check
794

795
    with tempfile.TemporaryDirectory() as test_dir:
796
        yaml_path = os.path.join(test_dir, "meta.yaml")
797
        # workable but meaningless usually
798
799
800
801
802
803
        yaml_data = {
            "dataset_name": "default",
            "node_data": [],
            "edge_data": [],
        }
        with open(yaml_path, "w") as f:
804
            yaml.dump(yaml_data, f, sort_keys=False)
805
        meta = load_yaml_with_sanity_check(yaml_path)
806
807
808
        assert meta.version == "1.0.0"
        assert meta.dataset_name == "default"
        assert meta.separator == ","
809
810
811
812
        assert len(meta.node_data) == 0
        assert len(meta.edge_data) == 0
        assert meta.graph_data is None
        # minimum with required fields only
813
814
815
816
817
818
819
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes.csv"}],
            "edge_data": [{"file_name": "edges.csv"}],
        }
        with open(yaml_path, "w") as f:
820
            yaml.dump(yaml_data, f, sort_keys=False)
821
        meta = load_yaml_with_sanity_check(yaml_path)
822
        for ndata in meta.node_data:
823
824
825
826
            assert ndata.file_name == "nodes.csv"
            assert ndata.ntype == "_V"
            assert ndata.graph_id_field == "graph_id"
            assert ndata.node_id_field == "node_id"
827
        for edata in meta.edge_data:
828
829
830
831
832
            assert edata.file_name == "edges.csv"
            assert edata.etype == ["_V", "_E", "_V"]
            assert edata.graph_id_field == "graph_id"
            assert edata.src_id_field == "src_id"
            assert edata.dst_id_field == "dst_id"
833
        # optional fields are specified
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "separator": "|",
            "node_data": [
                {
                    "file_name": "nodes.csv",
                    "ntype": "user",
                    "graph_id_field": "xxx",
                    "node_id_field": "xxx",
                }
            ],
            "edge_data": [
                {
                    "file_name": "edges.csv",
                    "etype": ["user", "follow", "user"],
                    "graph_id_field": "xxx",
                    "src_id_field": "xxx",
                    "dst_id_field": "xxx",
                }
            ],
            "graph_data": {"file_name": "graph.csv", "graph_id_field": "xxx"},
        }
        with open(yaml_path, "w") as f:
858
            yaml.dump(yaml_data, f, sort_keys=False)
859
        meta = load_yaml_with_sanity_check(yaml_path)
860
861
        assert len(meta.node_data) == 1
        ndata = meta.node_data[0]
862
863
864
        assert ndata.ntype == "user"
        assert ndata.graph_id_field == "xxx"
        assert ndata.node_id_field == "xxx"
865
866
        assert len(meta.edge_data) == 1
        edata = meta.edge_data[0]
867
868
869
870
        assert edata.etype == ["user", "follow", "user"]
        assert edata.graph_id_field == "xxx"
        assert edata.src_id_field == "xxx"
        assert edata.dst_id_field == "xxx"
871
        assert meta.graph_data is not None
872
873
        assert meta.graph_data.file_name == "graph.csv"
        assert meta.graph_data.graph_id_field == "xxx"
874
        # some required fields are missing
875
876
877
878
879
        yaml_data = {
            "dataset_name": "default",
            "node_data": [],
            "edge_data": [],
        }
880
881
882
        for field in yaml_data.keys():
            ydata = {k: v for k, v in yaml_data.items()}
            ydata.pop(field)
883
            with open(yaml_path, "w") as f:
884
885
886
                yaml.dump(ydata, f, sort_keys=False)
            expect_except = False
            try:
887
                meta = load_yaml_with_sanity_check(yaml_path)
888
889
890
891
            except:
                expect_except = True
            assert expect_except
        # inapplicable version
892
893
894
895
896
897
898
        yaml_data = {
            "version": "0.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes_0.csv"}],
            "edge_data": [{"file_name": "edges_0.csv"}],
        }
        with open(yaml_path, "w") as f:
899
900
901
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
902
            meta = load_yaml_with_sanity_check(yaml_path)
903
904
905
906
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate node types
907
908
909
910
911
912
913
914
915
916
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [
                {"file_name": "nodes.csv"},
                {"file_name": "nodes.csv"},
            ],
            "edge_data": [{"file_name": "edges.csv"}],
        }
        with open(yaml_path, "w") as f:
917
918
919
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
920
            meta = load_yaml_with_sanity_check(yaml_path)
921
922
923
924
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate edge types
925
926
927
928
929
930
931
932
933
934
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes.csv"}],
            "edge_data": [
                {"file_name": "edges.csv"},
                {"file_name": "edges.csv"},
            ],
        }
        with open(yaml_path, "w") as f:
935
936
937
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
938
            meta = load_yaml_with_sanity_check(yaml_path)
939
940
941
942
943
944
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_node_data_from_csv():
945
946
    from dgl.data.csv_dataset_base import DefaultDataParser, MetaNode, NodeData

947
948
949
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        # minimum
950
951
        df = pd.DataFrame({"node_id": np.arange(num_nodes)})
        csv_path = os.path.join(test_dir, "nodes.csv")
952
        df.to_csv(csv_path, index=False)
953
        meta_node = MetaNode(file_name=csv_path)
954
955
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
956
957
958
        assert len(node_data.data) == 0

        # common case
959
960
961
962
963
964
965
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": np.random.randint(3, size=num_nodes),
            }
        )
        csv_path = os.path.join(test_dir, "nodes.csv")
966
        df.to_csv(csv_path, index=False)
967
        meta_node = MetaNode(file_name=csv_path)
968
969
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
970
        assert len(node_data.data) == 1
971
        assert np.array_equal(df["label"], node_data.data["label"])
972
        assert np.array_equal(np.full(num_nodes, 0), node_data.graph_id)
973
        assert node_data.type == "_V"
974
975

        # add more fields into nodes.csv
976
977
978
979
980
981
982
983
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": np.random.randint(3, size=num_nodes),
                "graph_id": np.full(num_nodes, 1),
            }
        )
        csv_path = os.path.join(test_dir, "nodes.csv")
984
        df.to_csv(csv_path, index=False)
985
        meta_node = MetaNode(file_name=csv_path)
986
987
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
988
        assert len(node_data.data) == 1
989
990
991
        assert np.array_equal(df["label"], node_data.data["label"])
        assert np.array_equal(df["graph_id"], node_data.graph_id)
        assert node_data.type == "_V"
992
993

        # required header is missing
994
995
        df = pd.DataFrame({"label": np.random.randint(3, size=num_nodes)})
        csv_path = os.path.join(test_dir, "nodes.csv")
996
        df.to_csv(csv_path, index=False)
997
        meta_node = MetaNode(file_name=csv_path)
998
999
        expect_except = False
        try:
1000
            NodeData.load_from_csv(meta_node, DefaultDataParser())
1001
1002
1003
1004
1005
1006
        except:
            expect_except = True
        assert expect_except


def _test_load_edge_data_from_csv():
1007
1008
    from dgl.data.csv_dataset_base import DefaultDataParser, EdgeData, MetaEdge

1009
1010
1011
1012
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        num_edges = 1000
        # minimum
1013
1014
1015
1016
1017
1018
1019
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1020
        df.to_csv(csv_path, index=False)
1021
        meta_edge = MetaEdge(file_name=csv_path)
1022
1023
1024
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1025
1026
1027
        assert len(edge_data.data) == 0

        # common case
1028
1029
1030
1031
1032
1033
1034
1035
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "label": np.random.randint(3, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1036
        df.to_csv(csv_path, index=False)
1037
        meta_edge = MetaEdge(file_name=csv_path)
1038
1039
1040
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1041
        assert len(edge_data.data) == 1
1042
        assert np.array_equal(df["label"], edge_data.data["label"])
1043
        assert np.array_equal(np.full(num_edges, 0), edge_data.graph_id)
1044
        assert edge_data.type == ("_V", "_E", "_V")
1045
1046

        # add more fields into edges.csv
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "graph_id": np.arange(num_edges),
                "feat": np.random.randint(3, size=num_edges),
                "label": np.random.randint(3, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1057
        df.to_csv(csv_path, index=False)
1058
        meta_edge = MetaEdge(file_name=csv_path)
1059
1060
1061
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1062
        assert len(edge_data.data) == 2
1063
1064
1065
1066
        assert np.array_equal(df["feat"], edge_data.data["feat"])
        assert np.array_equal(df["label"], edge_data.data["label"])
        assert np.array_equal(df["graph_id"], edge_data.graph_id)
        assert edge_data.type == ("_V", "_E", "_V")
1067
1068

        # required headers are missing
1069
        df = pd.DataFrame(
1070
            {"src_id": np.random.randint(num_nodes, size=num_edges)}
1071
1072
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1073
        df.to_csv(csv_path, index=False)
1074
        meta_edge = MetaEdge(file_name=csv_path)
1075
1076
        expect_except = False
        try:
1077
            EdgeData.load_from_csv(meta_edge, DefaultDataParser())
1078
1079
1080
        except DGLError:
            expect_except = True
        assert expect_except
1081
        df = pd.DataFrame(
1082
            {"dst_id": np.random.randint(num_nodes, size=num_edges)}
1083
1084
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1085
        df.to_csv(csv_path, index=False)
1086
        meta_edge = MetaEdge(file_name=csv_path)
1087
1088
        expect_except = False
        try:
1089
            EdgeData.load_from_csv(meta_edge, DefaultDataParser())
1090
1091
1092
1093
1094
1095
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_graph_data_from_csv():
1096
1097
1098
1099
1100
1101
    from dgl.data.csv_dataset_base import (
        DefaultDataParser,
        GraphData,
        MetaGraph,
    )

1102
1103
1104
    with tempfile.TemporaryDirectory() as test_dir:
        num_graphs = 100
        # minimum
1105
1106
        df = pd.DataFrame({"graph_id": np.arange(num_graphs)})
        csv_path = os.path.join(test_dir, "graph.csv")
1107
        df.to_csv(csv_path, index=False)
1108
        meta_graph = MetaGraph(file_name=csv_path)
1109
1110
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1111
1112
1113
        assert len(graph_data.data) == 0

        # common case
1114
1115
1116
1117
1118
1119
1120
        df = pd.DataFrame(
            {
                "graph_id": np.arange(num_graphs),
                "label": np.random.randint(3, size=num_graphs),
            }
        )
        csv_path = os.path.join(test_dir, "graph.csv")
1121
        df.to_csv(csv_path, index=False)
1122
        meta_graph = MetaGraph(file_name=csv_path)
1123
1124
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1125
        assert len(graph_data.data) == 1
1126
        assert np.array_equal(df["label"], graph_data.data["label"])
1127
1128

        # add more fields into graph.csv
1129
1130
1131
1132
1133
1134
1135
1136
        df = pd.DataFrame(
            {
                "graph_id": np.arange(num_graphs),
                "feat": np.random.randint(3, size=num_graphs),
                "label": np.random.randint(3, size=num_graphs),
            }
        )
        csv_path = os.path.join(test_dir, "graph.csv")
1137
        df.to_csv(csv_path, index=False)
1138
        meta_graph = MetaGraph(file_name=csv_path)
1139
1140
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1141
        assert len(graph_data.data) == 2
1142
1143
        assert np.array_equal(df["feat"], graph_data.data["feat"])
        assert np.array_equal(df["label"], graph_data.data["label"])
1144
1145

        # required header is missing
1146
1147
        df = pd.DataFrame({"label": np.random.randint(3, size=num_graphs)})
        csv_path = os.path.join(test_dir, "graph.csv")
1148
        df.to_csv(csv_path, index=False)
1149
        meta_graph = MetaGraph(file_name=csv_path)
1150
1151
        expect_except = False
        try:
1152
            GraphData.load_from_csv(meta_graph, DefaultDataParser())
1153
1154
1155
1156
1157
        except DGLError:
            expect_except = True
        assert expect_except


1158
def _test_CSVDataset_single():
1159
1160
1161
1162
1163
1164
1165
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
        }
        with open(meta_yaml_path, "w") as f:
1191
1192
1193
1194
1195
1196
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes)
1197
1198
1199
1200
1201
1202
1203
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
            }
        )
1204
1205
1206
1207
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
        feat_edata = np.random.rand(num_edges, num_dims)
        label_edata = np.random.randint(2, size=num_edges)
1208
1209
1210
1211
1212
1213
1214
1215
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "label": label_edata,
                "feat": [line.tolist() for line in feat_edata],
            }
        )
1216
1217
1218
1219
1220
1221
1222
1223
1224
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)

        # load CSVDataset
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
1225
            csv_dataset = data.CSVDataset(test_dir, force_reload=force_reload)
1226
1227
1228
1229
1230
1231
            assert len(csv_dataset) == 1
            g = csv_dataset[0]
            assert not g.is_homogeneous
            assert csv_dataset.has_cache()
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
1232
1233
1234
1235
1236
1237
1238
                assert F.array_equal(
                    F.tensor(feat_ndata, dtype=F.float32),
                    g.nodes[ntype].data["feat"],
                )
                assert np.array_equal(
                    label_ndata, F.asnumpy(g.nodes[ntype].data["label"])
                )
1239
1240
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
1241
1242
1243
1244
1245
1246
1247
                assert F.array_equal(
                    F.tensor(feat_edata, dtype=F.float32),
                    g.edges[etype].data["feat"],
                )
                assert np.array_equal(
                    label_edata, F.asnumpy(g.edges[etype].data["label"])
                )
1248
1249


1250
def _test_CSVDataset_multiple():
1251
1252
1253
1254
1255
1256
1257
1258
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
            "graph_data": {"file_name": os.path.basename(graph_csv_path)},
        }
        with open(meta_yaml_path, "w") as f:
1285
1286
1287
1288
1289
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
        num_dims = 3
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
        feat_ndata = np.random.rand(num_nodes * num_graphs, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes * num_graphs)
        df = pd.DataFrame(
            {
                "node_id": np.hstack(
                    [np.arange(num_nodes) for _ in range(num_graphs)]
                ),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
                "graph_id": np.hstack(
                    [np.full(num_nodes, i) for i in range(num_graphs)]
                ),
            }
        )
1304
1305
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
        feat_edata = np.random.rand(num_edges * num_graphs, num_dims)
        label_edata = np.random.randint(2, size=num_edges * num_graphs)
        df = pd.DataFrame(
            {
                "src_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "dst_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "label": label_edata,
                "feat": [line.tolist() for line in feat_edata],
                "graph_id": np.hstack(
                    [np.full(num_edges, i) for i in range(num_graphs)]
                ),
            }
        )
1329
1330
1331
1332
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        feat_gdata = np.random.rand(num_graphs, num_dims)
        label_gdata = np.random.randint(2, size=num_graphs)
1333
1334
1335
1336
1337
1338
1339
        df = pd.DataFrame(
            {
                "label": label_gdata,
                "feat": [line.tolist() for line in feat_gdata],
                "graph_id": np.arange(num_graphs),
            }
        )
1340
1341
        df.to_csv(graph_csv_path, index=False)

1342
        # load CSVDataset with default node/edge/gdata_parser
1343
1344
1345
1346
1347
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
1348
            csv_dataset = data.CSVDataset(test_dir, force_reload=force_reload)
1349
1350
1351
            assert len(csv_dataset) == num_graphs
            assert csv_dataset.has_cache()
            assert len(csv_dataset.data) == 2
1352
1353
1354
1355
1356
            assert "feat" in csv_dataset.data
            assert "label" in csv_dataset.data
            assert F.array_equal(
                F.tensor(feat_gdata, dtype=F.float32), csv_dataset.data["feat"]
            )
1357
            for i, (g, g_data) in enumerate(csv_dataset):
1358
                assert not g.is_homogeneous
1359
1360
1361
1362
                assert F.asnumpy(g_data["label"]) == label_gdata[i]
                assert F.array_equal(
                    g_data["feat"], F.tensor(feat_gdata[i], dtype=F.float32)
                )
1363
1364
                for ntype in g.ntypes:
                    assert g.num_nodes(ntype) == num_nodes
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
                    assert F.array_equal(
                        F.tensor(
                            feat_ndata[i * num_nodes : (i + 1) * num_nodes],
                            dtype=F.float32,
                        ),
                        g.nodes[ntype].data["feat"],
                    )
                    assert np.array_equal(
                        label_ndata[i * num_nodes : (i + 1) * num_nodes],
                        F.asnumpy(g.nodes[ntype].data["label"]),
                    )
1376
1377
                for etype in g.etypes:
                    assert g.num_edges(etype) == num_edges
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
                    assert F.array_equal(
                        F.tensor(
                            feat_edata[i * num_edges : (i + 1) * num_edges],
                            dtype=F.float32,
                        ),
                        g.edges[etype].data["feat"],
                    )
                    assert np.array_equal(
                        label_edata[i * num_edges : (i + 1) * num_edges],
                        F.asnumpy(g.edges[etype].data["label"]),
                    )
1389
1390


1391
def _test_CSVDataset_customized_data_parser():
1392
1393
1394
1395
1396
1397
1398
1399
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
        meta_yaml_data = {
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
            "graph_data": {"file_name": os.path.basename(graph_csv_path)},
        }
        with open(meta_yaml_path, "w") as f:
1425
1426
1427
1428
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
        label_ndata = np.random.randint(2, size=num_nodes * num_graphs)
        df = pd.DataFrame(
            {
                "node_id": np.hstack(
                    [np.arange(num_nodes) for _ in range(num_graphs)]
                ),
                "label": label_ndata,
                "graph_id": np.hstack(
                    [np.full(num_nodes, i) for i in range(num_graphs)]
                ),
            }
        )
1441
1442
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
        label_edata = np.random.randint(2, size=num_edges * num_graphs)
        df = pd.DataFrame(
            {
                "src_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "dst_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "label": label_edata,
                "graph_id": np.hstack(
                    [np.full(num_edges, i) for i in range(num_graphs)]
                ),
            }
        )
1464
1465
1466
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        label_gdata = np.random.randint(2, size=num_graphs)
1467
1468
1469
        df = pd.DataFrame(
            {"label": label_gdata, "graph_id": np.arange(num_graphs)}
        )
1470
1471
1472
1473
1474
1475
1476
        df.to_csv(graph_csv_path, index=False)

        class CustDataParser:
            def __call__(self, df):
                data = {}
                for header in df:
                    dt = df[header].to_numpy().squeeze()
1477
                    if header == "label":
1478
1479
1480
                        dt += 2
                    data[header] = dt
                return data
1481

1482
1483
1484
        # load CSVDataset with customized node/edge/gdata_parser
        # specify via dict[ntype/etype, callable]
        csv_dataset = data.CSVDataset(
1485
1486
1487
1488
1489
1490
            test_dir,
            force_reload=True,
            ndata_parser={"user": CustDataParser()},
            edata_parser={("user", "like", "item"): CustDataParser()},
            gdata_parser=CustDataParser(),
        )
1491
1492
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
1493
        assert "label" in csv_dataset.data
1494
        for i, (g, g_data) in enumerate(csv_dataset):
1495
            assert not g.is_homogeneous
Mufei Li's avatar
Mufei Li committed
1496
            assert F.asnumpy(g_data) == label_gdata[i] + 2
1497
1498
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
1499
1500
1501
1502
1503
                offset = 2 if ntype == "user" else 0
                assert np.array_equal(
                    label_ndata[i * num_nodes : (i + 1) * num_nodes] + offset,
                    F.asnumpy(g.nodes[ntype].data["label"]),
                )
1504
1505
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
1506
1507
1508
1509
1510
                offset = 2 if etype == "like" else 0
                assert np.array_equal(
                    label_edata[i * num_edges : (i + 1) * num_edges] + offset,
                    F.asnumpy(g.edges[etype].data["label"]),
                )
1511
1512
        # specify via callable
        csv_dataset = data.CSVDataset(
1513
1514
1515
1516
1517
1518
            test_dir,
            force_reload=True,
            ndata_parser=CustDataParser(),
            edata_parser=CustDataParser(),
            gdata_parser=CustDataParser(),
        )
1519
1520
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
1521
        assert "label" in csv_dataset.data
1522
1523
        for i, (g, g_data) in enumerate(csv_dataset):
            assert not g.is_homogeneous
Mufei Li's avatar
Mufei Li committed
1524
            assert F.asnumpy(g_data) == label_gdata[i] + 2
1525
1526
1527
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
                offset = 2
1528
1529
1530
1531
                assert np.array_equal(
                    label_ndata[i * num_nodes : (i + 1) * num_nodes] + offset,
                    F.asnumpy(g.nodes[ntype].data["label"]),
                )
1532
1533
1534
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
                offset = 2
1535
1536
1537
1538
                assert np.array_equal(
                    label_edata[i * num_edges : (i + 1) * num_edges] + offset,
                    F.asnumpy(g.edges[etype].data["label"]),
                )
1539
1540
1541


def _test_NodeEdgeGraphData():
1542
1543
    from dgl.data.csv_dataset_base import EdgeData, GraphData, NodeData

1544
1545
    # NodeData basics
    num_nodes = 100
1546
    node_ids = np.arange(num_nodes, dtype=float)
1547
    ndata = NodeData(node_ids, {})
1548
    assert np.array_equal(ndata.id, node_ids)
1549
    assert len(ndata.data) == 0
1550
    assert ndata.type == "_V"
1551
1552
    assert np.array_equal(ndata.graph_id, np.full(num_nodes, 0))
    # NodeData more
1553
    data = {"feat": np.random.rand(num_nodes, 3)}
1554
    graph_id = np.arange(num_nodes)
1555
1556
    ndata = NodeData(node_ids, data, type="user", graph_id=graph_id)
    assert ndata.type == "user"
1557
1558
1559
1560
1561
1562
1563
1564
    assert np.array_equal(ndata.graph_id, graph_id)
    assert len(ndata.data) == len(data)
    for k, v in data.items():
        assert k in ndata.data
        assert np.array_equal(ndata.data[k], v)
    # NodeData except
    expect_except = False
    try:
1565
1566
1567
1568
1569
        NodeData(
            np.arange(num_nodes),
            {"feat": np.random.rand(num_nodes + 1, 3)},
            graph_id=np.arange(num_nodes - 1),
        )
1570
1571
1572
1573
1574
1575
1576
1577
1578
    except:
        expect_except = True
    assert expect_except

    # EdgeData basics
    num_nodes = 100
    num_edges = 1000
    src_ids = np.random.randint(num_nodes, size=num_edges)
    dst_ids = np.random.randint(num_nodes, size=num_edges)
1579
    edata = EdgeData(src_ids, dst_ids, {})
1580
1581
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
1582
    assert edata.type == ("_V", "_E", "_V")
1583
1584
1585
    assert len(edata.data) == 0
    assert np.array_equal(edata.graph_id, np.full(num_edges, 0))
    # EdageData more
1586
1587
    src_ids = np.random.randint(num_nodes, size=num_edges).astype(float)
    dst_ids = np.random.randint(num_nodes, size=num_edges).astype(float)
1588
1589
    data = {"feat": np.random.rand(num_edges, 3)}
    etype = ("user", "like", "item")
1590
    graph_ids = np.arange(num_edges)
1591
    edata = EdgeData(src_ids, dst_ids, data, type=etype, graph_id=graph_ids)
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
    assert edata.type == etype
    assert len(edata.data) == len(data)
    for k, v in data.items():
        assert k in edata.data
        assert np.array_equal(edata.data[k], v)
    assert np.array_equal(edata.graph_id, graph_ids)
    # EdgeData except
    expect_except = False
    try:
1603
1604
1605
1606
1607
1608
        EdgeData(
            np.arange(num_edges),
            np.arange(num_edges + 1),
            {"feat": np.random.rand(num_edges - 1, 3)},
            graph_id=np.arange(num_edges + 2),
        )
1609
1610
1611
1612
1613
1614
1615
    except:
        expect_except = True
    assert expect_except

    # GraphData basics
    num_graphs = 10
    graph_ids = np.arange(num_graphs)
1616
    gdata = GraphData(graph_ids, {})
1617
1618
1619
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == 0
    # GraphData more
1620
    graph_ids = np.arange(num_graphs).astype(float)
1621
    data = {"feat": np.random.rand(num_graphs, 3)}
1622
    gdata = GraphData(graph_ids, data)
1623
1624
1625
1626
1627
1628
1629
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == len(data)
    for k, v in data.items():
        assert k in gdata.data
        assert np.array_equal(gdata.data[k], v)


1630
1631
1632
1633
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1634
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1635
1636
1637
@unittest.skipIf(
    dgl.backend.backend_name == "tensorflow", reason="Skip Tensorflow"
)
1638
1639
def test_csvdataset():
    _test_NodeEdgeGraphData()
1640
    _test_construct_graphs_node_ids()
1641
1642
1643
1644
1645
1646
1647
1648
    _test_construct_graphs_homo()
    _test_construct_graphs_hetero()
    _test_construct_graphs_multiple()
    _test_DefaultDataParser()
    _test_load_yaml_with_sanity_check()
    _test_load_node_data_from_csv()
    _test_load_edge_data_from_csv()
    _test_load_graph_data_from_csv()
1649
1650
1651
    _test_CSVDataset_single()
    _test_CSVDataset_multiple()
    _test_CSVDataset_customized_data_parser()
1652

1653
1654
1655
1656
1657

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1658
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1659
1660
def test_as_nodepred1():
    ds = data.AmazonCoBuyComputerDataset()
1661
    print("train_mask" in ds[0].ndata)
1662
1663
1664
1665
    new_ds = data.AsNodePredDataset(ds, [0.8, 0.1, 0.1], verbose=True)
    assert len(new_ds) == 1
    assert new_ds[0].num_nodes() == ds[0].num_nodes()
    assert new_ds[0].num_edges() == ds[0].num_edges()
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
    assert "train_mask" in new_ds[0].ndata
    assert F.array_equal(
        new_ds.train_idx, F.nonzero_1d(new_ds[0].ndata["train_mask"])
    )
    assert F.array_equal(
        new_ds.val_idx, F.nonzero_1d(new_ds[0].ndata["val_mask"])
    )
    assert F.array_equal(
        new_ds.test_idx, F.nonzero_1d(new_ds[0].ndata["test_mask"])
    )
1676
1677

    ds = data.AIFBDataset()
1678
1679
1680
1681
    print("train_mask" in ds[0].nodes["Personen"].data)
    new_ds = data.AsNodePredDataset(
        ds, [0.8, 0.1, 0.1], "Personen", verbose=True
    )
1682
1683
1684
    assert len(new_ds) == 1
    assert new_ds[0].ntypes == ds[0].ntypes
    assert new_ds[0].canonical_etypes == ds[0].canonical_etypes
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
    assert "train_mask" in new_ds[0].nodes["Personen"].data
    assert F.array_equal(
        new_ds.train_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["train_mask"]),
    )
    assert F.array_equal(
        new_ds.val_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["val_mask"]),
    )
    assert F.array_equal(
        new_ds.test_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["test_mask"]),
    )


@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1704
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1705
1706
1707
1708
def test_as_nodepred2():
    # test proper reprocessing

    # create
1709
1710
1711
1712
1713
1714
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.8
    )
1715
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.8)
1716
    # read from cache
1717
1718
1719
1720
1721
1722
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.8
    )
1723
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.8)
1724
    # invalid cache, re-read
1725
1726
1727
1728
1729
1730
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.1, 0.1, 0.8]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.1
    )
1731
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.1)
1732
1733

    # create
1734
1735
1736
1737
1738
1739
1740
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.8, 0.1, 0.1], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.8)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.8)
1741
    # read from cache
1742
1743
1744
1745
1746
1747
1748
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.8, 0.1, 0.1], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.8)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.8)
1749
    # invalid cache, re-read
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.1, 0.1, 0.8], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.1)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.1)


@unittest.skipIf(
    dgl.backend.backend_name != "pytorch", reason="ogb only supports pytorch"
)
Minjie Wang's avatar
Minjie Wang committed
1762
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
Jinjing Zhou's avatar
Jinjing Zhou committed
1763
1764
def test_as_nodepred_ogb():
    from ogb.nodeproppred import DglNodePropPredDataset
1765
1766
1767
1768

    ds = data.AsNodePredDataset(
        DglNodePropPredDataset("ogbn-arxiv"), split_ratio=None, verbose=True
    )
1769
    split = DglNodePropPredDataset("ogbn-arxiv").get_idx_split()
1770
    train_idx, val_idx, test_idx = split["train"], split["valid"], split["test"]
1771
1772
1773
    assert F.array_equal(ds.train_idx, F.tensor(train_idx))
    assert F.array_equal(ds.val_idx, F.tensor(val_idx))
    assert F.array_equal(ds.test_idx, F.tensor(test_idx))
Jinjing Zhou's avatar
Jinjing Zhou committed
1774
    # force generate new split
1775
1776
1777
1778
1779
1780
    ds = data.AsNodePredDataset(
        DglNodePropPredDataset("ogbn-arxiv"),
        split_ratio=[0.7, 0.2, 0.1],
        verbose=True,
    )

1781

1782
1783
1784
1785
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1786
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1787
1788
def test_as_linkpred():
    # create
1789
1790
1791
1792
1793
1794
    ds = data.AsLinkPredDataset(
        data.CoraGraphDataset(),
        split_ratio=[0.8, 0.1, 0.1],
        neg_ratio=1,
        verbose=True,
    )
1795
1796
1797
1798
1799
    # Cora has 10556 edges, 10% test edges can be 1057
    assert ds.test_edges[0][0].shape[0] == 1057
    # negative samples, not guaranteed, so the assert is in a relaxed range
    assert 1000 <= ds.test_edges[1][0].shape[0] <= 1057
    # read from cache
1800
1801
1802
1803
1804
1805
    ds = data.AsLinkPredDataset(
        data.CoraGraphDataset(),
        split_ratio=[0.7, 0.1, 0.2],
        neg_ratio=2,
        verbose=True,
    )
1806
1807
1808
1809
1810
    assert ds.test_edges[0][0].shape[0] == 2112
    # negative samples, not guaranteed to be ratio 2, so the assert is in a relaxed range
    assert 4000 < ds.test_edges[1][0].shape[0] <= 4224


1811
1812
1813
@unittest.skipIf(
    dgl.backend.backend_name != "pytorch", reason="ogb only supports pytorch"
)
1814
1815
def test_as_linkpred_ogb():
    from ogb.linkproppred import DglLinkPropPredDataset
1816
1817
1818
1819

    ds = data.AsLinkPredDataset(
        DglLinkPropPredDataset("ogbl-collab"), split_ratio=None, verbose=True
    )
1820
1821
1822
    # original dataset has 46329 test edges
    assert ds.test_edges[0][0].shape[0] == 46329
    # force generate new split
1823
1824
1825
1826
1827
    ds = data.AsLinkPredDataset(
        DglLinkPropPredDataset("ogbl-collab"),
        split_ratio=[0.7, 0.2, 0.1],
        verbose=True,
    )
1828
1829
    assert ds.test_edges[0][0].shape[0] == 235812

1830
1831
1832
1833
1834

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1835
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1836
1837
1838
@unittest.skipIf(
    dgl.backend.backend_name == "tensorflow", reason="Skip Tensorflow"
)
1839
1840
1841
1842
1843
1844
def test_as_nodepred_csvdataset():
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path = os.path.join(test_dir, "test_edges.csv")
        nodes_csv_path = os.path.join(test_dir, "test_nodes.csv")
1845
1846
1847
1848
1849
1850
1851
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [{"file_name": os.path.basename(nodes_csv_path)}],
            "edge_data": [{"file_name": os.path.basename(edges_csv_path)}],
        }
        with open(meta_yaml_path, "w") as f:
1852
1853
1854
1855
1856
1857
1858
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        num_classes = num_nodes
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.arange(num_classes)
1859
1860
1861
1862
1863
1864
1865
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
            }
        )
1866
        df.to_csv(nodes_csv_path, index=False)
1867
1868
1869
1870
1871
1872
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
            }
        )
1873
1874
        df.to_csv(edges_csv_path, index=False)

1875
        ds = data.CSVDataset(test_dir, force_reload=True)
1876
1877
1878
1879
1880
1881
1882
        assert "feat" in ds[0].ndata
        assert "label" in ds[0].ndata
        assert "train_mask" not in ds[0].ndata
        assert not hasattr(ds[0], "num_classes")
        new_ds = data.AsNodePredDataset(
            ds, split_ratio=[0.8, 0.1, 0.1], force_reload=True
        )
1883
        assert new_ds.num_classes == num_classes
1884
1885
1886
        assert "feat" in new_ds[0].ndata
        assert "label" in new_ds[0].ndata
        assert "train_mask" in new_ds[0].ndata
1887

1888
1889
1890
1891
1892

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1893
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
Mufei Li's avatar
Mufei Li committed
1894
def test_as_graphpred_reprocess():
1895
1896
1897
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1898
1899
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1900
1901
1902
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1903
1904
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1905
1906
1907
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1908
1909
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1910
1911
1912
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1913
1914
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1915
1916
1917
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1918
1919
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1920
1921
1922
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
    assert len(ds.train_idx) == int(len(ds) * 0.1)

    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.1, 0.1, 0.8])
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1934
1935
1936
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1937
1938
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1939
1940
1941
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1942
1943
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1944
1945
1946
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1947
1948
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1949
1950
1951
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1952
1953
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1954
1955
1956
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1957
1958
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1959
1960
1961
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1962
1963
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1964
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1965
1966
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1967
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1968
1969
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1970
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.1, 0.1, 0.8])
Mufei Li's avatar
Mufei Li committed
1971
1972
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1973
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1974
1975
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1976
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1977
1978
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1979
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.1, 0.1, 0.8])
Mufei Li's avatar
Mufei Li committed
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
    assert len(ds.train_idx) == int(len(ds) * 0.1)

    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.1, 0.1, 0.8])
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1991
1992
1993
1994

@unittest.skipIf(
    dgl.backend.backend_name != "pytorch", reason="ogb only supports pytorch"
)
Mufei Li's avatar
Mufei Li committed
1995
1996
def test_as_graphpred_ogb():
    from ogb.graphproppred import DglGraphPropPredDataset
1997
1998
1999
2000

    ds = data.AsGraphPredDataset(
        DglGraphPropPredDataset("ogbg-molhiv"), split_ratio=None, verbose=True
    )
Mufei Li's avatar
Mufei Li committed
2001
2002
    assert len(ds.train_idx) == 32901
    # force generate new split
2003
2004
2005
2006
2007
    ds = data.AsGraphPredDataset(
        DglGraphPropPredDataset("ogbg-molhiv"),
        split_ratio=[0.6, 0.2, 0.2],
        verbose=True,
    )
Mufei Li's avatar
Mufei Li committed
2008
2009
    assert len(ds.train_idx) == 24676

2010
2011

if __name__ == "__main__":
2012
    test_minigc()
2013
    test_gin()
2014
    test_data_hash()
2015
2016
2017
    test_tudataset_regression()
    test_fraud()
    test_fakenews()
2018
    test_csvdataset()
2019
2020
    test_as_nodepred1()
    test_as_nodepred2()
2021
    test_as_nodepred_csvdataset()