test_data.py 69.6 KB
Newer Older
1
import gzip
2
import io
3
import os
4
import tarfile
5
import tempfile
6
import unittest
Andrei Ivanov's avatar
Andrei Ivanov committed
7
import warnings
8

9
import backend as F
10
11
12

import dgl
import dgl.data as data
13
import numpy as np
14
15
import pandas as pd
import pytest
16
import yaml
17
from dgl import DGLError
18

19
20
21
22
23

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
24
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
25
26
27
28
def test_minigc():
    ds = data.MiniGCDataset(16, 10, 20)
    g, l = list(zip(*ds))
    print(g, l)
29
30
31
32
33
    g1 = ds[0][0]
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    ds = data.MiniGCDataset(16, 10, 20, transform=transform)
    g2 = ds[0][0]
    assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
34

35
36
37
38
39

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
40
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
41
42
def test_gin():
    ds_n_graphs = {
43
44
45
46
47
        "MUTAG": 188,
        "IMDBBINARY": 1000,
        "IMDBMULTI": 1500,
        "PROTEINS": 1113,
        "PTC": 344,
48
    }
49
    transform = dgl.AddSelfLoop(allow_duplicate=True)
50
51
52
    for name, n_graphs in ds_n_graphs.items():
        ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False)
        assert len(ds) == n_graphs, (len(ds), name)
53
        g1 = ds[0][0]
54
55
56
        ds = data.GINDataset(
            name, self_loop=False, degree_as_nlabel=False, transform=transform
        )
57
58
        g2 = ds[0][0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
Mufei Li's avatar
Mufei Li committed
59
        assert ds.num_classes == ds.gclasses
60

61
62
63
64
65

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
66
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
67
def test_fraud():
68
69
    transform = dgl.AddSelfLoop(allow_duplicate=True)

70
    g = data.FraudDataset("amazon")[0]
71
    assert g.num_nodes() == 11944
72
    num_edges1 = g.num_edges()
73
    g2 = data.FraudDataset("amazon", transform=transform)[0]
74
75
    # 3 edge types
    assert g2.num_edges() - num_edges1 == g.num_nodes() * 3
76
77
78

    g = data.FraudAmazonDataset()[0]
    assert g.num_nodes() == 11944
79
80
81
    g2 = data.FraudAmazonDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
82
83
84

    g = data.FraudYelpDataset()[0]
    assert g.num_nodes() == 45954
85
86
87
    g2 = data.FraudYelpDataset(transform=transform)[0]
    # 3 edge types
    assert g2.num_edges() - g.num_edges() == g.num_nodes() * 3
88

89
90
91
92
93

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
94
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
95
def test_tudataset_regression():
96
    ds = data.TUDataset("ZINC_test", force_reload=True)
Mufei Li's avatar
Mufei Li committed
97
    assert ds.num_classes == ds.num_labels
Jinjing Zhou's avatar
Jinjing Zhou committed
98
    assert len(ds) == 5000
99
    g = ds[0][0]
Jinjing Zhou's avatar
Jinjing Zhou committed
100

101
    transform = dgl.AddSelfLoop(allow_duplicate=True)
102
    ds = data.TUDataset("ZINC_test", force_reload=True, transform=transform)
103
104
    g2 = ds[0][0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
105

106
107
108
109
110

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
111
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
112
113
114
def test_data_hash():
    class HashTestDataset(data.DGLDataset):
        def __init__(self, hash_key=()):
115
            super(HashTestDataset, self).__init__("hashtest", hash_key=hash_key)
116

117
118
119
        def _load(self):
            pass

120
121
122
    a = HashTestDataset((True, 0, "1", (1, 2, 3)))
    b = HashTestDataset((True, 0, "1", (1, 2, 3)))
    c = HashTestDataset((True, 0, "1", (1, 2, 4)))
123
124
125
    assert a.hash == b.hash
    assert a.hash != c.hash

126

127
128
129
130
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
131
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
132
def test_citation_graph():
133
134
    transform = dgl.AddSelfLoop(allow_duplicate=True)

135
    # cora
136
    g = data.CoraGraphDataset(force_reload=True, reorder=True)[0]
137
138
139
140
    assert g.num_nodes() == 2708
    assert g.num_edges() == 10556
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
141
142
    g2 = data.CoraGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
143
144

    # Citeseer
145
    g = data.CiteseerGraphDataset(force_reload=True, reorder=True)[0]
146
147
148
149
    assert g.num_nodes() == 3327
    assert g.num_edges() == 9228
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
150
151
    g2 = data.CiteseerGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
152
153

    # Pubmed
154
    g = data.PubmedGraphDataset(force_reload=True, reorder=True)[0]
155
156
157
158
    assert g.num_nodes() == 19717
    assert g.num_edges() == 88651
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
159
160
    g2 = data.PubmedGraphDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
161
162


163
164
165
166
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
167
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
168
def test_gnn_benchmark():
169
170
    transform = dgl.AddSelfLoop(allow_duplicate=True)

171
172
173
174
175
176
    # AmazonCoBuyComputerDataset
    g = data.AmazonCoBuyComputerDataset()[0]
    assert g.num_nodes() == 13752
    assert g.num_edges() == 491722
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
177
178
    g2 = data.AmazonCoBuyComputerDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
179
180
181
182
183
184
185

    # AmazonCoBuyPhotoDataset
    g = data.AmazonCoBuyPhotoDataset()[0]
    assert g.num_nodes() == 7650
    assert g.num_edges() == 238163
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
186
187
    g2 = data.AmazonCoBuyPhotoDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
188
189
190
191
192
193
194

    # CoauthorPhysicsDataset
    g = data.CoauthorPhysicsDataset()[0]
    assert g.num_nodes() == 34493
    assert g.num_edges() == 495924
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
195
196
    g2 = data.CoauthorPhysicsDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
197
198
199
200
201
202
203

    # CoauthorCSDataset
    g = data.CoauthorCSDataset()[0]
    assert g.num_nodes() == 18333
    assert g.num_edges() == 163788
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
204
205
    g2 = data.CoauthorCSDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
206
207
208
209
210
211
212

    # CoraFullDataset
    g = data.CoraFullDataset()[0]
    assert g.num_nodes() == 19793
    assert g.num_edges() == 126842
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))
213
214
    g2 = data.CoraFullDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
215
216


217
218
219
220
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
221
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
222
223
224
225
def test_explain_syn():
    dataset = data.BAShapeDataset()
    assert dataset.num_classes == 4
    g = dataset[0]
226
227
    assert "label" in g.ndata
    assert "feat" in g.ndata
228
229
230
231
232
233
234
235
236
237
238

    g1 = data.BAShapeDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.BAShapeDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.BACommunityDataset()
    assert dataset.num_classes == 8
    g = dataset[0]
239
240
    assert "label" in g.ndata
    assert "feat" in g.ndata
241
242
243
244
245
246
247
248
249
250
251

    g1 = data.BACommunityDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.BACommunityDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.TreeCycleDataset()
    assert dataset.num_classes == 2
    g = dataset[0]
252
253
    assert "label" in g.ndata
    assert "feat" in g.ndata
254
255
256
257
258
259
260
261
262
263
264

    g1 = data.TreeCycleDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.TreeCycleDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.TreeGridDataset()
    assert dataset.num_classes == 2
    g = dataset[0]
265
266
    assert "label" in g.ndata
    assert "feat" in g.ndata
267
268
269
270
271
272
273
274
275
276
277

    g1 = data.TreeGridDataset(force_reload=True, seed=0)[0]
    src1, dst1 = g1.edges()
    g2 = data.TreeGridDataset(force_reload=True, seed=0)[0]
    src2, dst2 = g2.edges()
    assert F.allclose(src1, src2)
    assert F.allclose(dst1, dst2)

    dataset = data.BA2MotifDataset()
    assert dataset.num_classes == 2
    g, label = dataset[0]
278
    assert "feat" in g.ndata
279

280
281
282
283
284

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
285
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
286
287
288
289
290
291
292
293
294
295
296
def test_wiki_cs():
    g = data.WikiCSDataset()[0]
    assert g.num_nodes() == 11701
    assert g.num_edges() == 431726
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.WikiCSDataset(transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()

297

298
@unittest.skip(reason="Dataset too large to download for the latest CI.")
Minjie Wang's avatar
Minjie Wang committed
299
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
300
301
302
303
304
305
306
307
308
309
310
def test_yelp():
    g = data.YelpDataset(reorder=True)[0]
    assert g.num_nodes() == 716847
    assert g.num_edges() == 13954819
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.YelpDataset(reorder=True, transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()

311
312
313
314
315

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
316
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
317
318
319
320
321
322
323
324
325
326
def test_flickr():
    g = data.FlickrDataset(reorder=True)[0]
    assert g.num_nodes() == 89250
    assert g.num_edges() == 899756
    dst = F.asnumpy(g.edges()[1])
    assert np.array_equal(dst, np.sort(dst))

    transform = dgl.AddSelfLoop(allow_duplicate=True)
    g2 = data.FlickrDataset(reorder=True, transform=transform)[0]
    assert g2.num_edges() - g.num_edges() == g.num_nodes()
327

328

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_pattern():
    mode_n_graphs = {
        "train": 10000,
        "valid": 2000,
        "test": 2000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        ds = data.PATTERNDataset(mode=mode)
        assert len(ds) == n_graphs, (len(ds), mode)
        g1 = ds[0]
        ds = data.PATTERNDataset(mode=mode, transform=transform)
        g2 = ds[0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        assert ds.num_classes == 2


351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_cluster():
    mode_n_graphs = {
        "train": 10000,
        "valid": 1000,
        "test": 1000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        ds = data.CLUSTERDataset(mode=mode)
        assert len(ds) == n_graphs, (len(ds), mode)
        g1 = ds[0]
        ds = data.CLUSTERDataset(mode=mode, transform=transform)
        g2 = ds[0]
        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        assert ds.num_classes == 6


373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(
    dgl.backend.backend_name != "pytorch", reason="only supports pytorch"
)
def test_zinc():
    mode_n_graphs = {
        "train": 10000,
        "valid": 1000,
        "test": 1000,
    }
    transform = dgl.AddSelfLoop(allow_duplicate=True)
    for mode, n_graphs in mode_n_graphs.items():
        dataset1 = data.ZINCDataset(mode=mode)
        g1, label = dataset1[0]
        dataset2 = data.ZINCDataset(mode=mode, transform=transform)
        g2, _ = dataset2[0]

        assert g2.num_edges() - g1.num_edges() == g1.num_nodes()
        # return a scalar tensor
        assert not label.shape


@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
def test_extract_archive():
    # gzip
    with tempfile.TemporaryDirectory() as src_dir:
        gz_file = "gz_archive"
        gz_path = os.path.join(src_dir, gz_file + ".gz")
        content = b"test extract archive gzip"
        with gzip.open(gz_path, "wb") as f:
            f.write(content)
        with tempfile.TemporaryDirectory() as dst_dir:
            data.utils.extract_archive(gz_path, dst_dir, overwrite=True)
            assert os.path.exists(os.path.join(dst_dir, gz_file))

    # tar
    with tempfile.TemporaryDirectory() as src_dir:
        tar_file = "tar_archive"
        tar_path = os.path.join(src_dir, tar_file + ".tar")
        # default encode to utf8
        content = "test extract archive tar\n".encode()
        info = tarfile.TarInfo(name="tar_archive")
        info.size = len(content)
        with tarfile.open(tar_path, "w") as f:
            f.addfile(info, io.BytesIO(content))
        with tempfile.TemporaryDirectory() as dst_dir:
            data.utils.extract_archive(tar_path, dst_dir, overwrite=True)
            assert os.path.exists(os.path.join(dst_dir, tar_file))


430
def _test_construct_graphs_node_ids():
431
432
433
434
435
436
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

437
438
439
440
441
442
443
444
445
446
447
    num_nodes = 100
    num_edges = 1000

    # node IDs are required to be unique
    node_ids = np.random.choice(np.arange(num_nodes / 2), num_nodes)
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
    node_data = NodeData(node_ids, {})
    edge_data = EdgeData(src_ids, dst_ids, {})
    expect_except = False
    try:
448
        _, _ = DGLGraphConstructor.construct_graphs(node_data, edge_data)
449
450
451
452
453
454
455
456
457
458
459
    except:
        expect_except = True
    assert expect_except

    # node IDs are already labelled from 0~num_nodes-1
    node_ids = np.arange(num_nodes)
    np.random.shuffle(node_ids)
    _, idx = np.unique(node_ids, return_index=True)
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
    node_feat = np.random.rand(num_nodes, 3)
460
    node_data = NodeData(node_ids, {"feat": node_feat})
461
462
    edge_data = EdgeData(src_ids, dst_ids, {})
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
463
464
        node_data, edge_data
    )
465
466
467
468
469
470
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == len(node_ids)
    assert g.num_edges() == len(src_ids)
471
472
473
    assert F.array_equal(
        F.tensor(node_feat[idx], dtype=F.float32), g.ndata["feat"]
    )
474
475
476

    # node IDs are mixed with numeric and non-numeric values
    # homogeneous graph
477
    node_ids = [1, 2, 3, "a"]
478
    src_ids = [1, 2, 3]
479
    dst_ids = ["a", 1, 2]
480
481
482
    node_data = NodeData(node_ids, {})
    edge_data = EdgeData(src_ids, dst_ids, {})
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
483
484
        node_data, edge_data
    )
485
486
487
488
489
490
491
492
493
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == len(node_ids)
    assert g.num_edges() == len(src_ids)

    # heterogeneous graph
    node_ids_user = [1, 2, 3]
494
    node_ids_item = ["a", "b", "c"]
495
496
    src_ids = node_ids_user
    dst_ids = node_ids_item
497
498
499
    node_data_user = NodeData(node_ids_user, {}, type="user")
    node_data_item = NodeData(node_ids_item, {}, type="item")
    edge_data = EdgeData(src_ids, dst_ids, {}, type=("user", "like", "item"))
500
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
501
502
        [node_data_user, node_data_item], edge_data
    )
503
504
505
506
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert not g.is_homogeneous
507
508
    assert g.num_nodes("user") == len(node_ids_user)
    assert g.num_nodes("item") == len(node_ids_item)
509
510
511
    assert g.num_edges() == len(src_ids)


512
def _test_construct_graphs_homo():
513
514
515
516
517
518
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

519
    # node_id could be non-sorted, non-numeric.
520
521
522
523
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
    node_ids = np.random.choice(
524
525
        np.arange(num_nodes * 2), size=num_nodes, replace=False
    )
526
    assert len(node_ids) == num_nodes
527
    # to be non-sorted
528
    np.random.shuffle(node_ids)
529
    # to be non-numeric
530
531
532
533
534
    node_ids = ["id_{}".format(id) for id in node_ids]
    t_ndata = {
        "feat": np.random.rand(num_nodes, num_dims),
        "label": np.random.randint(2, size=num_nodes),
    }
535
    _, u_indices = np.unique(node_ids, return_index=True)
536
537
538
539
    ndata = {
        "feat": t_ndata["feat"][u_indices],
        "label": t_ndata["label"][u_indices],
    }
540
    node_data = NodeData(node_ids, t_ndata)
541
542
    src_ids = np.random.choice(node_ids, size=num_edges)
    dst_ids = np.random.choice(node_ids, size=num_edges)
543
544
545
546
    edata = {
        "feat": np.random.rand(num_edges, num_dims),
        "label": np.random.randint(2, size=num_edges),
    }
547
548
    edge_data = EdgeData(src_ids, dst_ids, edata)
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
549
550
        node_data, edge_data
    )
551
552
553
554
555
556
557
558
559
560
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert g.is_homogeneous
    assert g.num_nodes() == num_nodes
    assert g.num_edges() == num_edges

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
561
562
            assert F.dtype(rhs[key]) != F.float64
            assert F.array_equal(
563
564
565
                F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
            )

566
567
568
569
570
    assert_data(ndata, g.ndata)
    assert_data(edata, g.edata)


def _test_construct_graphs_hetero():
571
572
573
574
575
576
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        NodeData,
    )

577
    # node_id/src_id/dst_id could be non-sorted, duplicated, non-numeric.
578
579
580
    num_nodes = 100
    num_edges = 1000
    num_dims = 3
581
    ntypes = ["user", "item"]
582
583
584
585
586
    node_data = []
    node_ids_dict = {}
    ndata_dict = {}
    for ntype in ntypes:
        node_ids = np.random.choice(
587
588
            np.arange(num_nodes * 2), size=num_nodes, replace=False
        )
589
        assert len(node_ids) == num_nodes
590
        # to be non-sorted
591
        np.random.shuffle(node_ids)
592
        # to be non-numeric
593
594
595
596
597
        node_ids = ["id_{}".format(id) for id in node_ids]
        t_ndata = {
            "feat": np.random.rand(num_nodes, num_dims),
            "label": np.random.randint(2, size=num_nodes),
        }
598
        _, u_indices = np.unique(node_ids, return_index=True)
599
600
601
602
        ndata = {
            "feat": t_ndata["feat"][u_indices],
            "label": t_ndata["label"][u_indices],
        }
603
        node_data.append(NodeData(node_ids, t_ndata, type=ntype))
604
605
        node_ids_dict[ntype] = node_ids
        ndata_dict[ntype] = ndata
606
    etypes = [("user", "follow", "user"), ("user", "like", "item")]
607
608
609
610
611
    edge_data = []
    edata_dict = {}
    for src_type, e_type, dst_type in etypes:
        src_ids = np.random.choice(node_ids_dict[src_type], size=num_edges)
        dst_ids = np.random.choice(node_ids_dict[dst_type], size=num_edges)
612
613
614
615
616
617
618
        edata = {
            "feat": np.random.rand(num_edges, num_dims),
            "label": np.random.randint(2, size=num_edges),
        }
        edge_data.append(
            EdgeData(src_ids, dst_ids, edata, type=(src_type, e_type, dst_type))
        )
619
        edata_dict[(src_type, e_type, dst_type)] = edata
620
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
621
622
        node_data, edge_data
    )
623
624
625
626
    assert len(graphs) == 1
    assert len(data_dict) == 0
    g = graphs[0]
    assert not g.is_homogeneous
627
628
    assert g.num_nodes() == num_nodes * len(ntypes)
    assert g.num_edges() == num_edges * len(etypes)
629
630
631
632

    def assert_data(lhs, rhs):
        for key, value in lhs.items():
            assert key in rhs
633
634
            assert F.dtype(rhs[key]) != F.float64
            assert F.array_equal(
635
636
637
                F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
            )

638
639
640
641
642
643
644
645
646
    for ntype in g.ntypes:
        assert g.num_nodes(ntype) == num_nodes
        assert_data(ndata_dict[ntype], g.nodes[ntype].data)
    for etype in g.canonical_etypes:
        assert g.num_edges(etype) == num_edges
        assert_data(edata_dict[etype], g.edges[etype].data)


def _test_construct_graphs_multiple():
647
648
649
650
651
652
653
    from dgl.data.csv_dataset_base import (
        DGLGraphConstructor,
        EdgeData,
        GraphData,
        NodeData,
    )

654
655
656
657
    num_nodes = 100
    num_edges = 1000
    num_graphs = 10
    num_dims = 3
658
659
660
661
662
663
    node_ids = np.array([], dtype=int)
    src_ids = np.array([], dtype=int)
    dst_ids = np.array([], dtype=int)
    ngraph_ids = np.array([], dtype=int)
    egraph_ids = np.array([], dtype=int)
    u_indices = np.array([], dtype=int)
664
665
    for i in range(num_graphs):
        l_node_ids = np.random.choice(
666
667
            np.arange(num_nodes * 2), size=num_nodes, replace=False
        )
668
669
670
671
        node_ids = np.append(node_ids, l_node_ids)
        _, l_u_indices = np.unique(l_node_ids, return_index=True)
        u_indices = np.append(u_indices, l_u_indices)
        ngraph_ids = np.append(ngraph_ids, np.full(num_nodes, i))
672
673
674
675
676
677
        src_ids = np.append(
            src_ids, np.random.choice(l_node_ids, size=num_edges)
        )
        dst_ids = np.append(
            dst_ids, np.random.choice(l_node_ids, size=num_edges)
        )
678
        egraph_ids = np.append(egraph_ids, np.full(num_edges, i))
679
680
681
682
683
    ndata = {
        "feat": np.random.rand(num_nodes * num_graphs, num_dims),
        "label": np.random.randint(2, size=num_nodes * num_graphs),
    }
    ngraph_ids = ["graph_{}".format(id) for id in ngraph_ids]
684
    node_data = NodeData(node_ids, ndata, graph_id=ngraph_ids)
685
686
687
688
689
    egraph_ids = ["graph_{}".format(id) for id in egraph_ids]
    edata = {
        "feat": np.random.rand(num_edges * num_graphs, num_dims),
        "label": np.random.randint(2, size=num_edges * num_graphs),
    }
690
    edge_data = EdgeData(src_ids, dst_ids, edata, graph_id=egraph_ids)
691
692
693
694
695
    gdata = {
        "feat": np.random.rand(num_graphs, num_dims),
        "label": np.random.randint(2, size=num_graphs),
    }
    graph_ids = ["graph_{}".format(id) for id in np.arange(num_graphs)]
696
    graph_data = GraphData(graph_ids, gdata)
697
    graphs, data_dict = DGLGraphConstructor.construct_graphs(
698
699
        node_data, edge_data, graph_data
    )
700
701
702
    assert len(graphs) == num_graphs
    assert len(data_dict) == len(gdata)
    for k, v in data_dict.items():
703
        assert F.dtype(v) != F.float64
704
705
706
707
        assert F.array_equal(
            F.reshape(F.tensor(gdata[k], dtype=F.dtype(v)), (len(graphs), -1)),
            v,
        )
708
709
710
711
712
713
714
715
    for i, g in enumerate(graphs):
        assert g.is_homogeneous
        assert g.num_nodes() == num_nodes
        assert g.num_edges() == num_edges

        def assert_data(lhs, rhs, size, node=False):
            for key, value in lhs.items():
                assert key in rhs
716
                value = value[i * size : (i + 1) * size]
717
                if node:
718
                    indices = u_indices[i * size : (i + 1) * size]
719
                    value = value[indices]
720
721
                assert F.dtype(rhs[key]) != F.float64
                assert F.array_equal(
722
723
724
                    F.tensor(value, dtype=F.dtype(rhs[key])), rhs[key]
                )

725
726
727
728
        assert_data(ndata, g.ndata, num_nodes, node=True)
        assert_data(edata, g.edata, num_edges)

    # Graph IDs found in node/edge CSV but not in graph CSV
729
    graph_data = GraphData(np.arange(num_graphs - 2), {})
730
731
    expect_except = False
    try:
732
        _, _ = DGLGraphConstructor.construct_graphs(
733
734
            node_data, edge_data, graph_data
        )
735
736
737
738
739
    except:
        expect_except = True
    assert expect_except


740
def _get_data_table(data_frame, save_index=False):
741
    from dgl.data.csv_dataset_base import DefaultDataParser
742

743
744
    with tempfile.TemporaryDirectory() as test_dir:
        csv_path = os.path.join(test_dir, "nodes.csv")
Andrei Ivanov's avatar
Andrei Ivanov committed
745

746
        data_frame.to_csv(csv_path, index=save_index)
747
        dp = DefaultDataParser()
748
        df = pd.read_csv(csv_path)
Andrei Ivanov's avatar
Andrei Ivanov committed
749

750
751
752
    # Warning suppression : "Untitled column found. Ignored...",
    # which appears when a CSV file is saved with an index:
    #    data_frame.to_csv(csv_path, index=True).
Andrei Ivanov's avatar
Andrei Ivanov committed
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        return dp(df)


def _test_DefaultDataParser():
    # common csv
    num_nodes = 5
    num_labels = 3
    num_dims = 2
    node_id = np.arange(num_nodes)
    label = np.random.randint(num_labels, size=num_nodes)
    feat = np.random.rand(num_nodes, num_dims)
    df = pd.DataFrame(
        {
            "node_id": node_id,
            "label": label,
            "feat": [line.tolist() for line in feat],
        }
    )

    dt = _get_data_table(df)
    assert np.array_equal(node_id, dt["node_id"])
    assert np.array_equal(label, dt["label"])
    assert np.array_equal(feat, dt["feat"])

779
    # string consists of non-numeric values
Andrei Ivanov's avatar
Andrei Ivanov committed
780
781
782
783
784
785
786
787
    df = pd.DataFrame({"label": ["a", "b", "c"]})
    expect_except = False
    try:
        _get_data_table(df)
    except:
        expect_except = True
    assert expect_except

788
    # csv has index column which is ignored as it's unnamed
Andrei Ivanov's avatar
Andrei Ivanov committed
789
    df = pd.DataFrame({"label": [1, 2, 3]})
790
    dt = _get_data_table(df, True)
Andrei Ivanov's avatar
Andrei Ivanov committed
791
    assert len(dt) == 1
792
793
794


def _test_load_yaml_with_sanity_check():
795
    from dgl.data.csv_dataset_base import load_yaml_with_sanity_check
796

797
    with tempfile.TemporaryDirectory() as test_dir:
798
        yaml_path = os.path.join(test_dir, "meta.yaml")
799
        # workable but meaningless usually
800
801
802
803
804
805
        yaml_data = {
            "dataset_name": "default",
            "node_data": [],
            "edge_data": [],
        }
        with open(yaml_path, "w") as f:
806
            yaml.dump(yaml_data, f, sort_keys=False)
807
        meta = load_yaml_with_sanity_check(yaml_path)
808
809
810
        assert meta.version == "1.0.0"
        assert meta.dataset_name == "default"
        assert meta.separator == ","
811
812
813
814
        assert len(meta.node_data) == 0
        assert len(meta.edge_data) == 0
        assert meta.graph_data is None
        # minimum with required fields only
815
816
817
818
819
820
821
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes.csv"}],
            "edge_data": [{"file_name": "edges.csv"}],
        }
        with open(yaml_path, "w") as f:
822
            yaml.dump(yaml_data, f, sort_keys=False)
823
        meta = load_yaml_with_sanity_check(yaml_path)
824
        for ndata in meta.node_data:
825
826
827
828
            assert ndata.file_name == "nodes.csv"
            assert ndata.ntype == "_V"
            assert ndata.graph_id_field == "graph_id"
            assert ndata.node_id_field == "node_id"
829
        for edata in meta.edge_data:
830
831
832
833
834
            assert edata.file_name == "edges.csv"
            assert edata.etype == ["_V", "_E", "_V"]
            assert edata.graph_id_field == "graph_id"
            assert edata.src_id_field == "src_id"
            assert edata.dst_id_field == "dst_id"
835
        # optional fields are specified
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "separator": "|",
            "node_data": [
                {
                    "file_name": "nodes.csv",
                    "ntype": "user",
                    "graph_id_field": "xxx",
                    "node_id_field": "xxx",
                }
            ],
            "edge_data": [
                {
                    "file_name": "edges.csv",
                    "etype": ["user", "follow", "user"],
                    "graph_id_field": "xxx",
                    "src_id_field": "xxx",
                    "dst_id_field": "xxx",
                }
            ],
            "graph_data": {"file_name": "graph.csv", "graph_id_field": "xxx"},
        }
        with open(yaml_path, "w") as f:
860
            yaml.dump(yaml_data, f, sort_keys=False)
861
        meta = load_yaml_with_sanity_check(yaml_path)
862
863
        assert len(meta.node_data) == 1
        ndata = meta.node_data[0]
864
865
866
        assert ndata.ntype == "user"
        assert ndata.graph_id_field == "xxx"
        assert ndata.node_id_field == "xxx"
867
868
        assert len(meta.edge_data) == 1
        edata = meta.edge_data[0]
869
870
871
872
        assert edata.etype == ["user", "follow", "user"]
        assert edata.graph_id_field == "xxx"
        assert edata.src_id_field == "xxx"
        assert edata.dst_id_field == "xxx"
873
        assert meta.graph_data is not None
874
875
        assert meta.graph_data.file_name == "graph.csv"
        assert meta.graph_data.graph_id_field == "xxx"
876
        # some required fields are missing
877
878
879
880
881
        yaml_data = {
            "dataset_name": "default",
            "node_data": [],
            "edge_data": [],
        }
882
883
884
        for field in yaml_data.keys():
            ydata = {k: v for k, v in yaml_data.items()}
            ydata.pop(field)
885
            with open(yaml_path, "w") as f:
886
887
888
                yaml.dump(ydata, f, sort_keys=False)
            expect_except = False
            try:
889
                meta = load_yaml_with_sanity_check(yaml_path)
890
891
892
893
            except:
                expect_except = True
            assert expect_except
        # inapplicable version
894
895
896
897
898
899
900
        yaml_data = {
            "version": "0.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes_0.csv"}],
            "edge_data": [{"file_name": "edges_0.csv"}],
        }
        with open(yaml_path, "w") as f:
901
902
903
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
904
            meta = load_yaml_with_sanity_check(yaml_path)
905
906
907
908
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate node types
909
910
911
912
913
914
915
916
917
918
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [
                {"file_name": "nodes.csv"},
                {"file_name": "nodes.csv"},
            ],
            "edge_data": [{"file_name": "edges.csv"}],
        }
        with open(yaml_path, "w") as f:
919
920
921
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
922
            meta = load_yaml_with_sanity_check(yaml_path)
923
924
925
926
        except DGLError:
            expect_except = True
        assert expect_except
        # duplicate edge types
927
928
929
930
931
932
933
934
935
936
        yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default",
            "node_data": [{"file_name": "nodes.csv"}],
            "edge_data": [
                {"file_name": "edges.csv"},
                {"file_name": "edges.csv"},
            ],
        }
        with open(yaml_path, "w") as f:
937
938
939
            yaml.dump(yaml_data, f, sort_keys=False)
        expect_except = False
        try:
940
            meta = load_yaml_with_sanity_check(yaml_path)
941
942
943
944
945
946
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_node_data_from_csv():
947
948
    from dgl.data.csv_dataset_base import DefaultDataParser, MetaNode, NodeData

949
950
951
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        # minimum
952
953
        df = pd.DataFrame({"node_id": np.arange(num_nodes)})
        csv_path = os.path.join(test_dir, "nodes.csv")
954
        df.to_csv(csv_path, index=False)
955
        meta_node = MetaNode(file_name=csv_path)
956
957
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
958
959
960
        assert len(node_data.data) == 0

        # common case
961
962
963
964
965
966
967
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": np.random.randint(3, size=num_nodes),
            }
        )
        csv_path = os.path.join(test_dir, "nodes.csv")
968
        df.to_csv(csv_path, index=False)
969
        meta_node = MetaNode(file_name=csv_path)
970
971
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
972
        assert len(node_data.data) == 1
973
        assert np.array_equal(df["label"], node_data.data["label"])
974
        assert np.array_equal(np.full(num_nodes, 0), node_data.graph_id)
975
        assert node_data.type == "_V"
976
977

        # add more fields into nodes.csv
978
979
980
981
982
983
984
985
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": np.random.randint(3, size=num_nodes),
                "graph_id": np.full(num_nodes, 1),
            }
        )
        csv_path = os.path.join(test_dir, "nodes.csv")
986
        df.to_csv(csv_path, index=False)
987
        meta_node = MetaNode(file_name=csv_path)
988
989
        node_data = NodeData.load_from_csv(meta_node, DefaultDataParser())
        assert np.array_equal(df["node_id"], node_data.id)
990
        assert len(node_data.data) == 1
991
992
993
        assert np.array_equal(df["label"], node_data.data["label"])
        assert np.array_equal(df["graph_id"], node_data.graph_id)
        assert node_data.type == "_V"
994
995

        # required header is missing
996
997
        df = pd.DataFrame({"label": np.random.randint(3, size=num_nodes)})
        csv_path = os.path.join(test_dir, "nodes.csv")
998
        df.to_csv(csv_path, index=False)
999
        meta_node = MetaNode(file_name=csv_path)
1000
1001
        expect_except = False
        try:
1002
            NodeData.load_from_csv(meta_node, DefaultDataParser())
1003
1004
1005
1006
1007
1008
        except:
            expect_except = True
        assert expect_except


def _test_load_edge_data_from_csv():
1009
1010
    from dgl.data.csv_dataset_base import DefaultDataParser, EdgeData, MetaEdge

1011
1012
1013
1014
    with tempfile.TemporaryDirectory() as test_dir:
        num_nodes = 100
        num_edges = 1000
        # minimum
1015
1016
1017
1018
1019
1020
1021
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1022
        df.to_csv(csv_path, index=False)
1023
        meta_edge = MetaEdge(file_name=csv_path)
1024
1025
1026
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1027
1028
1029
        assert len(edge_data.data) == 0

        # common case
1030
1031
1032
1033
1034
1035
1036
1037
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "label": np.random.randint(3, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1038
        df.to_csv(csv_path, index=False)
1039
        meta_edge = MetaEdge(file_name=csv_path)
1040
1041
1042
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1043
        assert len(edge_data.data) == 1
1044
        assert np.array_equal(df["label"], edge_data.data["label"])
1045
        assert np.array_equal(np.full(num_edges, 0), edge_data.graph_id)
1046
        assert edge_data.type == ("_V", "_E", "_V")
1047
1048

        # add more fields into edges.csv
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "graph_id": np.arange(num_edges),
                "feat": np.random.randint(3, size=num_edges),
                "label": np.random.randint(3, size=num_edges),
            }
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1059
        df.to_csv(csv_path, index=False)
1060
        meta_edge = MetaEdge(file_name=csv_path)
1061
1062
1063
        edge_data = EdgeData.load_from_csv(meta_edge, DefaultDataParser())
        assert np.array_equal(df["src_id"], edge_data.src)
        assert np.array_equal(df["dst_id"], edge_data.dst)
1064
        assert len(edge_data.data) == 2
1065
1066
1067
1068
        assert np.array_equal(df["feat"], edge_data.data["feat"])
        assert np.array_equal(df["label"], edge_data.data["label"])
        assert np.array_equal(df["graph_id"], edge_data.graph_id)
        assert edge_data.type == ("_V", "_E", "_V")
1069
1070

        # required headers are missing
1071
        df = pd.DataFrame(
1072
            {"src_id": np.random.randint(num_nodes, size=num_edges)}
1073
1074
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1075
        df.to_csv(csv_path, index=False)
1076
        meta_edge = MetaEdge(file_name=csv_path)
1077
1078
        expect_except = False
        try:
1079
            EdgeData.load_from_csv(meta_edge, DefaultDataParser())
1080
1081
1082
        except DGLError:
            expect_except = True
        assert expect_except
1083
        df = pd.DataFrame(
1084
            {"dst_id": np.random.randint(num_nodes, size=num_edges)}
1085
1086
        )
        csv_path = os.path.join(test_dir, "edges.csv")
1087
        df.to_csv(csv_path, index=False)
1088
        meta_edge = MetaEdge(file_name=csv_path)
1089
1090
        expect_except = False
        try:
1091
            EdgeData.load_from_csv(meta_edge, DefaultDataParser())
1092
1093
1094
1095
1096
1097
        except DGLError:
            expect_except = True
        assert expect_except


def _test_load_graph_data_from_csv():
1098
1099
1100
1101
1102
1103
    from dgl.data.csv_dataset_base import (
        DefaultDataParser,
        GraphData,
        MetaGraph,
    )

1104
1105
1106
    with tempfile.TemporaryDirectory() as test_dir:
        num_graphs = 100
        # minimum
1107
1108
        df = pd.DataFrame({"graph_id": np.arange(num_graphs)})
        csv_path = os.path.join(test_dir, "graph.csv")
1109
        df.to_csv(csv_path, index=False)
1110
        meta_graph = MetaGraph(file_name=csv_path)
1111
1112
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1113
1114
1115
        assert len(graph_data.data) == 0

        # common case
1116
1117
1118
1119
1120
1121
1122
        df = pd.DataFrame(
            {
                "graph_id": np.arange(num_graphs),
                "label": np.random.randint(3, size=num_graphs),
            }
        )
        csv_path = os.path.join(test_dir, "graph.csv")
1123
        df.to_csv(csv_path, index=False)
1124
        meta_graph = MetaGraph(file_name=csv_path)
1125
1126
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1127
        assert len(graph_data.data) == 1
1128
        assert np.array_equal(df["label"], graph_data.data["label"])
1129
1130

        # add more fields into graph.csv
1131
1132
1133
1134
1135
1136
1137
1138
        df = pd.DataFrame(
            {
                "graph_id": np.arange(num_graphs),
                "feat": np.random.randint(3, size=num_graphs),
                "label": np.random.randint(3, size=num_graphs),
            }
        )
        csv_path = os.path.join(test_dir, "graph.csv")
1139
        df.to_csv(csv_path, index=False)
1140
        meta_graph = MetaGraph(file_name=csv_path)
1141
1142
        graph_data = GraphData.load_from_csv(meta_graph, DefaultDataParser())
        assert np.array_equal(df["graph_id"], graph_data.graph_id)
1143
        assert len(graph_data.data) == 2
1144
1145
        assert np.array_equal(df["feat"], graph_data.data["feat"])
        assert np.array_equal(df["label"], graph_data.data["label"])
1146
1147

        # required header is missing
1148
1149
        df = pd.DataFrame({"label": np.random.randint(3, size=num_graphs)})
        csv_path = os.path.join(test_dir, "graph.csv")
1150
        df.to_csv(csv_path, index=False)
1151
        meta_graph = MetaGraph(file_name=csv_path)
1152
1153
        expect_except = False
        try:
1154
            GraphData.load_from_csv(meta_graph, DefaultDataParser())
1155
1156
1157
1158
1159
        except DGLError:
            expect_except = True
        assert expect_except


1160
def _test_CSVDataset_single():
1161
1162
1163
1164
1165
1166
1167
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
        }
        with open(meta_yaml_path, "w") as f:
1193
1194
1195
1196
1197
1198
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes)
1199
1200
1201
1202
1203
1204
1205
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
            }
        )
1206
1207
1208
1209
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
        feat_edata = np.random.rand(num_edges, num_dims)
        label_edata = np.random.randint(2, size=num_edges)
1210
1211
1212
1213
1214
1215
1216
1217
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
                "label": label_edata,
                "feat": [line.tolist() for line in feat_edata],
            }
        )
1218
1219
1220
1221
1222
1223
1224
1225
1226
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)

        # load CSVDataset
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
1227
            csv_dataset = data.CSVDataset(test_dir, force_reload=force_reload)
1228
1229
1230
1231
1232
1233
            assert len(csv_dataset) == 1
            g = csv_dataset[0]
            assert not g.is_homogeneous
            assert csv_dataset.has_cache()
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
1234
1235
1236
1237
1238
1239
1240
                assert F.array_equal(
                    F.tensor(feat_ndata, dtype=F.float32),
                    g.nodes[ntype].data["feat"],
                )
                assert np.array_equal(
                    label_ndata, F.asnumpy(g.nodes[ntype].data["label"])
                )
1241
1242
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
1243
1244
1245
1246
1247
1248
1249
                assert F.array_equal(
                    F.tensor(feat_edata, dtype=F.float32),
                    g.edges[etype].data["feat"],
                )
                assert np.array_equal(
                    label_edata, F.asnumpy(g.edges[etype].data["label"])
                )
1250
1251


1252
def _test_CSVDataset_multiple():
1253
1254
1255
1256
1257
1258
1259
1260
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
            "graph_data": {"file_name": os.path.basename(graph_csv_path)},
        }
        with open(meta_yaml_path, "w") as f:
1287
1288
1289
1290
1291
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
        num_dims = 3
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
        feat_ndata = np.random.rand(num_nodes * num_graphs, num_dims)
        label_ndata = np.random.randint(2, size=num_nodes * num_graphs)
        df = pd.DataFrame(
            {
                "node_id": np.hstack(
                    [np.arange(num_nodes) for _ in range(num_graphs)]
                ),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
                "graph_id": np.hstack(
                    [np.full(num_nodes, i) for i in range(num_graphs)]
                ),
            }
        )
1306
1307
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
        feat_edata = np.random.rand(num_edges * num_graphs, num_dims)
        label_edata = np.random.randint(2, size=num_edges * num_graphs)
        df = pd.DataFrame(
            {
                "src_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "dst_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "label": label_edata,
                "feat": [line.tolist() for line in feat_edata],
                "graph_id": np.hstack(
                    [np.full(num_edges, i) for i in range(num_graphs)]
                ),
            }
        )
1331
1332
1333
1334
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        feat_gdata = np.random.rand(num_graphs, num_dims)
        label_gdata = np.random.randint(2, size=num_graphs)
1335
1336
1337
1338
1339
1340
1341
        df = pd.DataFrame(
            {
                "label": label_gdata,
                "feat": [line.tolist() for line in feat_gdata],
                "graph_id": np.arange(num_graphs),
            }
        )
1342
1343
        df.to_csv(graph_csv_path, index=False)

1344
        # load CSVDataset with default node/edge/gdata_parser
1345
1346
1347
1348
1349
        for force_reload in [True, False]:
            if not force_reload:
                # remove original node data file to verify reload from cached files
                os.remove(nodes_csv_path_0)
                assert not os.path.exists(nodes_csv_path_0)
1350
            csv_dataset = data.CSVDataset(test_dir, force_reload=force_reload)
1351
1352
1353
            assert len(csv_dataset) == num_graphs
            assert csv_dataset.has_cache()
            assert len(csv_dataset.data) == 2
1354
1355
1356
1357
1358
            assert "feat" in csv_dataset.data
            assert "label" in csv_dataset.data
            assert F.array_equal(
                F.tensor(feat_gdata, dtype=F.float32), csv_dataset.data["feat"]
            )
1359
            for i, (g, g_data) in enumerate(csv_dataset):
1360
                assert not g.is_homogeneous
1361
1362
1363
1364
                assert F.asnumpy(g_data["label"]) == label_gdata[i]
                assert F.array_equal(
                    g_data["feat"], F.tensor(feat_gdata[i], dtype=F.float32)
                )
1365
1366
                for ntype in g.ntypes:
                    assert g.num_nodes(ntype) == num_nodes
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
                    assert F.array_equal(
                        F.tensor(
                            feat_ndata[i * num_nodes : (i + 1) * num_nodes],
                            dtype=F.float32,
                        ),
                        g.nodes[ntype].data["feat"],
                    )
                    assert np.array_equal(
                        label_ndata[i * num_nodes : (i + 1) * num_nodes],
                        F.asnumpy(g.nodes[ntype].data["label"]),
                    )
1378
1379
                for etype in g.etypes:
                    assert g.num_edges(etype) == num_edges
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
                    assert F.array_equal(
                        F.tensor(
                            feat_edata[i * num_edges : (i + 1) * num_edges],
                            dtype=F.float32,
                        ),
                        g.edges[etype].data["feat"],
                    )
                    assert np.array_equal(
                        label_edata[i * num_edges : (i + 1) * num_edges],
                        F.asnumpy(g.edges[etype].data["label"]),
                    )
1391
1392


1393
def _test_CSVDataset_customized_data_parser():
1394
1395
1396
1397
1398
1399
1400
1401
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path_0 = os.path.join(test_dir, "test_edges_0.csv")
        edges_csv_path_1 = os.path.join(test_dir, "test_edges_1.csv")
        nodes_csv_path_0 = os.path.join(test_dir, "test_nodes_0.csv")
        nodes_csv_path_1 = os.path.join(test_dir, "test_nodes_1.csv")
        graph_csv_path = os.path.join(test_dir, "test_graph.csv")
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
        meta_yaml_data = {
            "dataset_name": "default_name",
            "node_data": [
                {
                    "file_name": os.path.basename(nodes_csv_path_0),
                    "ntype": "user",
                },
                {
                    "file_name": os.path.basename(nodes_csv_path_1),
                    "ntype": "item",
                },
            ],
            "edge_data": [
                {
                    "file_name": os.path.basename(edges_csv_path_0),
                    "etype": ["user", "follow", "user"],
                },
                {
                    "file_name": os.path.basename(edges_csv_path_1),
                    "etype": ["user", "like", "item"],
                },
            ],
            "graph_data": {"file_name": os.path.basename(graph_csv_path)},
        }
        with open(meta_yaml_path, "w") as f:
1427
1428
1429
1430
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_graphs = 10
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
        label_ndata = np.random.randint(2, size=num_nodes * num_graphs)
        df = pd.DataFrame(
            {
                "node_id": np.hstack(
                    [np.arange(num_nodes) for _ in range(num_graphs)]
                ),
                "label": label_ndata,
                "graph_id": np.hstack(
                    [np.full(num_nodes, i) for i in range(num_graphs)]
                ),
            }
        )
1443
1444
        df.to_csv(nodes_csv_path_0, index=False)
        df.to_csv(nodes_csv_path_1, index=False)
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
        label_edata = np.random.randint(2, size=num_edges * num_graphs)
        df = pd.DataFrame(
            {
                "src_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "dst_id": np.hstack(
                    [
                        np.random.randint(num_nodes, size=num_edges)
                        for _ in range(num_graphs)
                    ]
                ),
                "label": label_edata,
                "graph_id": np.hstack(
                    [np.full(num_edges, i) for i in range(num_graphs)]
                ),
            }
        )
1466
1467
1468
        df.to_csv(edges_csv_path_0, index=False)
        df.to_csv(edges_csv_path_1, index=False)
        label_gdata = np.random.randint(2, size=num_graphs)
1469
1470
1471
        df = pd.DataFrame(
            {"label": label_gdata, "graph_id": np.arange(num_graphs)}
        )
1472
1473
1474
1475
1476
1477
1478
        df.to_csv(graph_csv_path, index=False)

        class CustDataParser:
            def __call__(self, df):
                data = {}
                for header in df:
                    dt = df[header].to_numpy().squeeze()
1479
                    if header == "label":
1480
1481
1482
                        dt += 2
                    data[header] = dt
                return data
1483

1484
1485
1486
        # load CSVDataset with customized node/edge/gdata_parser
        # specify via dict[ntype/etype, callable]
        csv_dataset = data.CSVDataset(
1487
1488
1489
1490
1491
1492
            test_dir,
            force_reload=True,
            ndata_parser={"user": CustDataParser()},
            edata_parser={("user", "like", "item"): CustDataParser()},
            gdata_parser=CustDataParser(),
        )
1493
1494
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
1495
        assert "label" in csv_dataset.data
1496
        for i, (g, g_data) in enumerate(csv_dataset):
1497
            assert not g.is_homogeneous
Mufei Li's avatar
Mufei Li committed
1498
            assert F.asnumpy(g_data) == label_gdata[i] + 2
1499
1500
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
1501
1502
1503
1504
1505
                offset = 2 if ntype == "user" else 0
                assert np.array_equal(
                    label_ndata[i * num_nodes : (i + 1) * num_nodes] + offset,
                    F.asnumpy(g.nodes[ntype].data["label"]),
                )
1506
1507
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
1508
1509
1510
1511
1512
                offset = 2 if etype == "like" else 0
                assert np.array_equal(
                    label_edata[i * num_edges : (i + 1) * num_edges] + offset,
                    F.asnumpy(g.edges[etype].data["label"]),
                )
1513
1514
        # specify via callable
        csv_dataset = data.CSVDataset(
1515
1516
1517
1518
1519
1520
            test_dir,
            force_reload=True,
            ndata_parser=CustDataParser(),
            edata_parser=CustDataParser(),
            gdata_parser=CustDataParser(),
        )
1521
1522
        assert len(csv_dataset) == num_graphs
        assert len(csv_dataset.data) == 1
1523
        assert "label" in csv_dataset.data
1524
1525
        for i, (g, g_data) in enumerate(csv_dataset):
            assert not g.is_homogeneous
Mufei Li's avatar
Mufei Li committed
1526
            assert F.asnumpy(g_data) == label_gdata[i] + 2
1527
1528
1529
            for ntype in g.ntypes:
                assert g.num_nodes(ntype) == num_nodes
                offset = 2
1530
1531
1532
1533
                assert np.array_equal(
                    label_ndata[i * num_nodes : (i + 1) * num_nodes] + offset,
                    F.asnumpy(g.nodes[ntype].data["label"]),
                )
1534
1535
1536
            for etype in g.etypes:
                assert g.num_edges(etype) == num_edges
                offset = 2
1537
1538
1539
1540
                assert np.array_equal(
                    label_edata[i * num_edges : (i + 1) * num_edges] + offset,
                    F.asnumpy(g.edges[etype].data["label"]),
                )
1541
1542
1543


def _test_NodeEdgeGraphData():
1544
1545
    from dgl.data.csv_dataset_base import EdgeData, GraphData, NodeData

1546
1547
    # NodeData basics
    num_nodes = 100
1548
    node_ids = np.arange(num_nodes, dtype=float)
1549
    ndata = NodeData(node_ids, {})
1550
    assert np.array_equal(ndata.id, node_ids)
1551
    assert len(ndata.data) == 0
1552
    assert ndata.type == "_V"
1553
1554
    assert np.array_equal(ndata.graph_id, np.full(num_nodes, 0))
    # NodeData more
1555
    data = {"feat": np.random.rand(num_nodes, 3)}
1556
    graph_id = np.arange(num_nodes)
1557
1558
    ndata = NodeData(node_ids, data, type="user", graph_id=graph_id)
    assert ndata.type == "user"
1559
1560
1561
1562
1563
1564
1565
1566
    assert np.array_equal(ndata.graph_id, graph_id)
    assert len(ndata.data) == len(data)
    for k, v in data.items():
        assert k in ndata.data
        assert np.array_equal(ndata.data[k], v)
    # NodeData except
    expect_except = False
    try:
1567
1568
1569
1570
1571
        NodeData(
            np.arange(num_nodes),
            {"feat": np.random.rand(num_nodes + 1, 3)},
            graph_id=np.arange(num_nodes - 1),
        )
1572
1573
1574
1575
1576
1577
1578
1579
1580
    except:
        expect_except = True
    assert expect_except

    # EdgeData basics
    num_nodes = 100
    num_edges = 1000
    src_ids = np.random.randint(num_nodes, size=num_edges)
    dst_ids = np.random.randint(num_nodes, size=num_edges)
1581
    edata = EdgeData(src_ids, dst_ids, {})
1582
1583
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
1584
    assert edata.type == ("_V", "_E", "_V")
1585
1586
1587
    assert len(edata.data) == 0
    assert np.array_equal(edata.graph_id, np.full(num_edges, 0))
    # EdageData more
1588
1589
    src_ids = np.random.randint(num_nodes, size=num_edges).astype(float)
    dst_ids = np.random.randint(num_nodes, size=num_edges).astype(float)
1590
1591
    data = {"feat": np.random.rand(num_edges, 3)}
    etype = ("user", "like", "item")
1592
    graph_ids = np.arange(num_edges)
1593
    edata = EdgeData(src_ids, dst_ids, data, type=etype, graph_id=graph_ids)
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
    assert np.array_equal(edata.src, src_ids)
    assert np.array_equal(edata.dst, dst_ids)
    assert edata.type == etype
    assert len(edata.data) == len(data)
    for k, v in data.items():
        assert k in edata.data
        assert np.array_equal(edata.data[k], v)
    assert np.array_equal(edata.graph_id, graph_ids)
    # EdgeData except
    expect_except = False
    try:
1605
1606
1607
1608
1609
1610
        EdgeData(
            np.arange(num_edges),
            np.arange(num_edges + 1),
            {"feat": np.random.rand(num_edges - 1, 3)},
            graph_id=np.arange(num_edges + 2),
        )
1611
1612
1613
1614
1615
1616
1617
    except:
        expect_except = True
    assert expect_except

    # GraphData basics
    num_graphs = 10
    graph_ids = np.arange(num_graphs)
1618
    gdata = GraphData(graph_ids, {})
1619
1620
1621
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == 0
    # GraphData more
1622
    graph_ids = np.arange(num_graphs).astype(float)
1623
    data = {"feat": np.random.rand(num_graphs, 3)}
1624
    gdata = GraphData(graph_ids, data)
1625
1626
1627
1628
1629
1630
1631
    assert np.array_equal(gdata.graph_id, graph_ids)
    assert len(gdata.data) == len(data)
    for k, v in data.items():
        assert k in gdata.data
        assert np.array_equal(gdata.data[k], v)


1632
1633
1634
1635
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1636
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1637
1638
1639
@unittest.skipIf(
    dgl.backend.backend_name == "tensorflow", reason="Skip Tensorflow"
)
1640
1641
def test_csvdataset():
    _test_NodeEdgeGraphData()
1642
    _test_construct_graphs_node_ids()
1643
1644
1645
1646
1647
1648
1649
1650
    _test_construct_graphs_homo()
    _test_construct_graphs_hetero()
    _test_construct_graphs_multiple()
    _test_DefaultDataParser()
    _test_load_yaml_with_sanity_check()
    _test_load_node_data_from_csv()
    _test_load_edge_data_from_csv()
    _test_load_graph_data_from_csv()
1651
1652
1653
    _test_CSVDataset_single()
    _test_CSVDataset_multiple()
    _test_CSVDataset_customized_data_parser()
1654

1655
1656
1657
1658
1659

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1660
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1661
1662
def test_as_nodepred1():
    ds = data.AmazonCoBuyComputerDataset()
1663
    print("train_mask" in ds[0].ndata)
1664
1665
1666
1667
    new_ds = data.AsNodePredDataset(ds, [0.8, 0.1, 0.1], verbose=True)
    assert len(new_ds) == 1
    assert new_ds[0].num_nodes() == ds[0].num_nodes()
    assert new_ds[0].num_edges() == ds[0].num_edges()
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
    assert "train_mask" in new_ds[0].ndata
    assert F.array_equal(
        new_ds.train_idx, F.nonzero_1d(new_ds[0].ndata["train_mask"])
    )
    assert F.array_equal(
        new_ds.val_idx, F.nonzero_1d(new_ds[0].ndata["val_mask"])
    )
    assert F.array_equal(
        new_ds.test_idx, F.nonzero_1d(new_ds[0].ndata["test_mask"])
    )
1678
1679

    ds = data.AIFBDataset()
1680
1681
1682
1683
    print("train_mask" in ds[0].nodes["Personen"].data)
    new_ds = data.AsNodePredDataset(
        ds, [0.8, 0.1, 0.1], "Personen", verbose=True
    )
1684
1685
1686
    assert len(new_ds) == 1
    assert new_ds[0].ntypes == ds[0].ntypes
    assert new_ds[0].canonical_etypes == ds[0].canonical_etypes
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
    assert "train_mask" in new_ds[0].nodes["Personen"].data
    assert F.array_equal(
        new_ds.train_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["train_mask"]),
    )
    assert F.array_equal(
        new_ds.val_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["val_mask"]),
    )
    assert F.array_equal(
        new_ds.test_idx,
        F.nonzero_1d(new_ds[0].nodes["Personen"].data["test_mask"]),
    )


@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1706
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1707
1708
1709
1710
def test_as_nodepred2():
    # test proper reprocessing

    # create
1711
1712
1713
1714
1715
1716
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.8
    )
1717
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.8)
1718
    # read from cache
1719
1720
1721
1722
1723
1724
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.8, 0.1, 0.1]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.8
    )
1725
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.8)
1726
    # invalid cache, re-read
1727
1728
1729
1730
1731
1732
    ds = data.AsNodePredDataset(
        data.AmazonCoBuyComputerDataset(), [0.1, 0.1, 0.8]
    )
    assert F.sum(F.astype(ds[0].ndata["train_mask"], F.int32), 0) == int(
        ds[0].num_nodes() * 0.1
    )
1733
    assert len(ds.train_idx) == int(ds[0].num_nodes() * 0.1)
1734
1735

    # create
1736
1737
1738
1739
1740
1741
1742
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.8, 0.1, 0.1], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.8)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.8)
1743
    # read from cache
1744
1745
1746
1747
1748
1749
1750
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.8, 0.1, 0.1], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.8)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.8)
1751
    # invalid cache, re-read
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
    ds = data.AsNodePredDataset(
        data.AIFBDataset(), [0.1, 0.1, 0.8], "Personen", verbose=True
    )
    assert F.sum(
        F.astype(ds[0].nodes["Personen"].data["train_mask"], F.int32), 0
    ) == int(ds[0].num_nodes("Personen") * 0.1)
    assert len(ds.train_idx) == int(ds[0].num_nodes("Personen") * 0.1)


@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1765
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1766
1767
def test_as_linkpred():
    # create
1768
1769
1770
1771
1772
1773
    ds = data.AsLinkPredDataset(
        data.CoraGraphDataset(),
        split_ratio=[0.8, 0.1, 0.1],
        neg_ratio=1,
        verbose=True,
    )
1774
1775
1776
1777
1778
    # Cora has 10556 edges, 10% test edges can be 1057
    assert ds.test_edges[0][0].shape[0] == 1057
    # negative samples, not guaranteed, so the assert is in a relaxed range
    assert 1000 <= ds.test_edges[1][0].shape[0] <= 1057
    # read from cache
1779
1780
1781
1782
1783
1784
    ds = data.AsLinkPredDataset(
        data.CoraGraphDataset(),
        split_ratio=[0.7, 0.1, 0.2],
        neg_ratio=2,
        verbose=True,
    )
1785
1786
1787
1788
1789
    assert ds.test_edges[0][0].shape[0] == 2112
    # negative samples, not guaranteed to be ratio 2, so the assert is in a relaxed range
    assert 4000 < ds.test_edges[1][0].shape[0] <= 4224


1790
1791
1792
1793
@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1794
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
1795
1796
1797
@unittest.skipIf(
    dgl.backend.backend_name == "tensorflow", reason="Skip Tensorflow"
)
1798
1799
1800
1801
1802
1803
def test_as_nodepred_csvdataset():
    with tempfile.TemporaryDirectory() as test_dir:
        # generate YAML/CSVs
        meta_yaml_path = os.path.join(test_dir, "meta.yaml")
        edges_csv_path = os.path.join(test_dir, "test_edges.csv")
        nodes_csv_path = os.path.join(test_dir, "test_nodes.csv")
1804
1805
1806
1807
1808
1809
1810
        meta_yaml_data = {
            "version": "1.0.0",
            "dataset_name": "default_name",
            "node_data": [{"file_name": os.path.basename(nodes_csv_path)}],
            "edge_data": [{"file_name": os.path.basename(edges_csv_path)}],
        }
        with open(meta_yaml_path, "w") as f:
1811
1812
1813
1814
1815
1816
1817
            yaml.dump(meta_yaml_data, f, sort_keys=False)
        num_nodes = 100
        num_edges = 500
        num_dims = 3
        num_classes = num_nodes
        feat_ndata = np.random.rand(num_nodes, num_dims)
        label_ndata = np.arange(num_classes)
1818
1819
1820
1821
1822
1823
1824
        df = pd.DataFrame(
            {
                "node_id": np.arange(num_nodes),
                "label": label_ndata,
                "feat": [line.tolist() for line in feat_ndata],
            }
        )
1825
        df.to_csv(nodes_csv_path, index=False)
1826
1827
1828
1829
1830
1831
        df = pd.DataFrame(
            {
                "src_id": np.random.randint(num_nodes, size=num_edges),
                "dst_id": np.random.randint(num_nodes, size=num_edges),
            }
        )
1832
1833
        df.to_csv(edges_csv_path, index=False)

1834
        ds = data.CSVDataset(test_dir, force_reload=True)
1835
1836
1837
1838
1839
1840
1841
        assert "feat" in ds[0].ndata
        assert "label" in ds[0].ndata
        assert "train_mask" not in ds[0].ndata
        assert not hasattr(ds[0], "num_classes")
        new_ds = data.AsNodePredDataset(
            ds, split_ratio=[0.8, 0.1, 0.1], force_reload=True
        )
1842
        assert new_ds.num_classes == num_classes
1843
1844
1845
        assert "feat" in new_ds[0].ndata
        assert "label" in new_ds[0].ndata
        assert "train_mask" in new_ds[0].ndata
1846

1847
1848
1849
1850
1851

@unittest.skipIf(
    F._default_context_str == "gpu",
    reason="Datasets don't need to be tested on GPU.",
)
Minjie Wang's avatar
Minjie Wang committed
1852
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Skip MXNet")
Mufei Li's avatar
Mufei Li committed
1853
def test_as_graphpred_reprocess():
1854
1855
1856
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1857
1858
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1859
1860
1861
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1862
1863
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1864
1865
1866
    ds = data.AsGraphPredDataset(
        data.GINDataset(name="MUTAG", self_loop=True), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1867
1868
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1869
1870
1871
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1872
1873
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1874
1875
1876
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1877
1878
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1879
1880
1881
    ds = data.AsGraphPredDataset(
        data.FakeNewsDataset("politifact", "profile"), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
    assert len(ds.train_idx) == int(len(ds) * 0.1)

    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
    ds = data.AsGraphPredDataset(data.QM7bDataset(), [0.1, 0.1, 0.8])
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1893
1894
1895
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1896
1897
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1898
1899
1900
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1901
1902
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1903
1904
1905
    ds = data.AsGraphPredDataset(
        data.QM9Dataset(label_keys=["mu", "gap"]), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1906
1907
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1908
1909
1910
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1911
1912
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1913
1914
1915
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.8, 0.1, 0.1]
    )
Mufei Li's avatar
Mufei Li committed
1916
1917
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1918
1919
1920
    ds = data.AsGraphPredDataset(
        data.QM9EdgeDataset(label_keys=["mu", "alpha"]), [0.1, 0.1, 0.8]
    )
Mufei Li's avatar
Mufei Li committed
1921
1922
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1923
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1924
1925
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1926
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1927
1928
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1929
    ds = data.AsGraphPredDataset(data.TUDataset("DD"), [0.1, 0.1, 0.8])
Mufei Li's avatar
Mufei Li committed
1930
1931
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1932
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1933
1934
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
1935
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.8, 0.1, 0.1])
Mufei Li's avatar
Mufei Li committed
1936
1937
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
1938
    ds = data.AsGraphPredDataset(data.LegacyTUDataset("DD"), [0.1, 0.1, 0.8])
Mufei Li's avatar
Mufei Li committed
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
    assert len(ds.train_idx) == int(len(ds) * 0.1)

    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # read from cache
    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.8, 0.1, 0.1])
    assert len(ds.train_idx) == int(len(ds) * 0.8)
    # invalid cache, re-read
    ds = data.AsGraphPredDataset(data.BA2MotifDataset(), [0.1, 0.1, 0.8])
    assert len(ds.train_idx) == int(len(ds) * 0.1)

1950
1951

if __name__ == "__main__":
1952
    test_minigc()
1953
    test_gin()
1954
    test_data_hash()
1955
1956
1957
    test_tudataset_regression()
    test_fraud()
    test_fakenews()
1958
    test_csvdataset()
1959
1960
    test_as_nodepred1()
    test_as_nodepred2()
1961
    test_as_nodepred_csvdataset()