test_pin_memory.py 2.35 KB
Newer Older
1
2
3
4
import backend as F
import pytest
import torch

5
6
7
8
9
10
import dgl


@pytest.mark.skipif(
    F._default_context_str == "cpu", reason="Need gpu for this test."
)
11
12
13
14
15
16
17
18
19
def test_pin_noncontiguous():
    t = torch.empty([10, 100]).transpose(0, 1)

    assert not t.is_contiguous()
    assert not F.is_pinned(t)

    with pytest.raises(dgl.DGLError):
        dgl.utils.pin_memory_inplace(t)

20
21
22
23

@pytest.mark.skipif(
    F._default_context_str == "cpu", reason="Need gpu for this test."
)
24
25
26
27
28
29
30
31
32
33
def test_pin_view():
    t = torch.empty([100, 10])
    v = t[10:20]

    assert v.is_contiguous()
    assert not F.is_pinned(t)

    with pytest.raises(dgl.DGLError):
        dgl.utils.pin_memory_inplace(v)

34
35
36
37

@pytest.mark.skipif(
    F._default_context_str == "cpu", reason="Need gpu for this test."
)
38
39
40
41
42
43
44
45
46
47
48
49
50
def test_unpin_automatically():
    # run a sufficient number of iterations such that the memory pool should be
    # re-used
    for j in range(10):
        t = torch.ones(10000, 10)
        assert not F.is_pinned(t)
        nd = dgl.utils.pin_memory_inplace(t)
        assert F.is_pinned(t)
        del nd
        # dgl.ndarray will unpin its data upon destruction
        assert not F.is_pinned(t)
        del t

51
52
53
54

@pytest.mark.skipif(
    F._default_context_str == "cpu", reason="Need gpu for this test."
)
55
56
57
def test_pin_unpin_column():
    g = dgl.graph(([1, 2, 3, 4], [0, 0, 0, 0]))

58
    g.ndata["x"] = torch.randn(g.num_nodes())
59
60
    g.pin_memory_()
    assert g.is_pinned()
61
    assert g.ndata["x"].is_pinned()
62
63
64
65
    for col in g._node_frames[0].values():
        assert col.pinned_by_dgl
        assert col._data_nd is not None

66
    g.ndata["x"] = torch.randn(g.num_nodes())  # unpin the old ndata['x']
67
68
69
70
    assert g.is_pinned()
    for col in g._node_frames[0].values():
        assert not col.pinned_by_dgl
        assert col._data_nd is None
71
    assert not g.ndata["x"].is_pinned()
72

73
74
75
76

@pytest.mark.skipif(
    F._default_context_str == "cpu", reason="Need gpu for this test."
)
77
78
79
80
81
82
83
84
85
86
def test_pin_empty():
    t = torch.tensor([])
    assert not t.is_pinned()

    # Empty tensors will not be pinned or unpinned. It's a no-op.
    # This is also the default behavior in PyTorch.
    # We just check that it won't raise an error.
    nd = dgl.utils.pin_memory_inplace(t)
    assert not t.is_pinned()

87

88
89
90
if __name__ == "__main__":
    test_pin_noncontiguous()
    test_pin_view()
91
92
    test_unpin_automatically()
    test_pin_unpin_column()