Unverified Commit 5b409bf7 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

Rename_test (#5487)


Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 0dd4f767
......@@ -40,7 +40,7 @@ def _assert_is_identical_hetero(g, g2):
# check if node ID spaces and feature spaces are equal
for ntype in g.ntypes:
assert g.number_of_nodes(ntype) == g2.number_of_nodes(ntype)
assert g.num_nodes(ntype) == g2.num_nodes(ntype)
# check if edge ID spaces and feature spaces are equal
for etype in g.canonical_etypes:
......
......@@ -319,7 +319,7 @@ def test_spmv_3d_feat(idtype):
a = sp.random(n, n, p, data_rvs=lambda n: np.ones(n))
g = dgl.DGLGraph(a)
g = g.astype(idtype).to(F.ctx())
m = g.number_of_edges()
m = g.num_edges()
# test#1: v2v with adj data
h = F.randn((n, 5, 5))
......
This diff is collapsed.
......@@ -30,8 +30,8 @@ def test_sum_case1(idtype):
@pytest.mark.parametrize("reducer", ["sum", "max", "mean"])
def test_reduce_readout(g, idtype, reducer):
g = g.astype(idtype).to(F.ctx())
g.ndata["h"] = F.randn((g.number_of_nodes(), 3))
g.edata["h"] = F.randn((g.number_of_edges(), 2))
g.ndata["h"] = F.randn((g.num_nodes(), 3))
g.edata["h"] = F.randn((g.num_edges(), 2))
# Test.1: node readout
x = dgl.readout_nodes(g, "h", op=reducer)
......@@ -77,10 +77,10 @@ def test_reduce_readout(g, idtype, reducer):
@pytest.mark.parametrize("reducer", ["sum", "max", "mean"])
def test_weighted_reduce_readout(g, idtype, reducer):
g = g.astype(idtype).to(F.ctx())
g.ndata["h"] = F.randn((g.number_of_nodes(), 3))
g.ndata["w"] = F.randn((g.number_of_nodes(), 1))
g.edata["h"] = F.randn((g.number_of_edges(), 2))
g.edata["w"] = F.randn((g.number_of_edges(), 1))
g.ndata["h"] = F.randn((g.num_nodes(), 3))
g.ndata["w"] = F.randn((g.num_nodes(), 1))
g.edata["h"] = F.randn((g.num_edges(), 2))
g.edata["w"] = F.randn((g.num_edges(), 1))
# Test.1: node readout
x = dgl.readout_nodes(g, "h", "w", op=reducer)
......@@ -126,7 +126,7 @@ def test_weighted_reduce_readout(g, idtype, reducer):
@pytest.mark.parametrize("descending", [True, False])
def test_topk(g, idtype, descending):
g = g.astype(idtype).to(F.ctx())
g.ndata["x"] = F.randn((g.number_of_nodes(), 3))
g.ndata["x"] = F.randn((g.num_nodes(), 3))
# Test.1: to test the case where k > number of nodes.
dgl.topk_nodes(g, "x", 100, sortby=-1)
......@@ -158,7 +158,7 @@ def test_topk(g, idtype, descending):
# Test.3: sorby=None
dgl.topk_nodes(g, "x", k, sortby=None)
g.edata["x"] = F.randn((g.number_of_edges(), 3))
g.edata["x"] = F.randn((g.num_edges(), 3))
# Test.4: topk edges where k > number of edges.
dgl.topk_edges(g, "x", 100, sortby=-1)
......@@ -192,8 +192,8 @@ def test_topk(g, idtype, descending):
@pytest.mark.parametrize("g", get_cases(["homo"], exclude=["dglgraph"]))
def test_softmax(g, idtype):
g = g.astype(idtype).to(F.ctx())
g.ndata["h"] = F.randn((g.number_of_nodes(), 3))
g.edata["h"] = F.randn((g.number_of_edges(), 2))
g.ndata["h"] = F.randn((g.num_nodes(), 3))
g.edata["h"] = F.randn((g.num_edges(), 2))
# Test.1: node readout
x = dgl.softmax_nodes(g, "h")
......@@ -224,7 +224,7 @@ def test_broadcast(idtype, g):
for i, sg in enumerate(subg):
assert F.allclose(
sg.ndata["h"],
F.repeat(F.reshape(gfeat[i], (1, 3)), sg.number_of_nodes(), dim=0),
F.repeat(F.reshape(gfeat[i], (1, 3)), sg.num_nodes(), dim=0),
)
# Test.1: broadcast_edges
......@@ -233,5 +233,5 @@ def test_broadcast(idtype, g):
for i, sg in enumerate(subg):
assert F.allclose(
sg.edata["h"],
F.repeat(F.reshape(gfeat[i], (1, 3)), sg.number_of_edges(), dim=0),
F.repeat(F.reshape(gfeat[i], (1, 3)), sg.num_edges(), dim=0),
)
......@@ -43,15 +43,15 @@ def test_edge_subgraph():
sg.ndata[dgl.NID], F.tensor([0, 2, 4, 5, 1, 9], g.idtype)
)
assert F.array_equal(sg.edata[dgl.EID], F.tensor(eid, g.idtype))
sg.ndata["h"] = F.arange(0, sg.number_of_nodes())
sg.edata["h"] = F.arange(0, sg.number_of_edges())
sg.ndata["h"] = F.arange(0, sg.num_nodes())
sg.edata["h"] = F.arange(0, sg.num_edges())
# relabel=False
sg = g.edge_subgraph(eid, relabel_nodes=False)
assert g.number_of_nodes() == sg.number_of_nodes()
assert g.num_nodes() == sg.num_nodes()
assert F.array_equal(sg.edata[dgl.EID], F.tensor(eid, g.idtype))
sg.ndata["h"] = F.arange(0, sg.number_of_nodes())
sg.edata["h"] = F.arange(0, sg.number_of_edges())
sg.ndata["h"] = F.arange(0, sg.num_nodes())
sg.edata["h"] = F.arange(0, sg.num_edges())
def test_subgraph():
......@@ -192,8 +192,8 @@ def test_subgraph_mask(idtype):
assert F.array_equal(
F.tensor(sg.edges["wishes"].data[dgl.EID]), F.tensor([1], idtype)
)
assert sg.number_of_nodes("developer") == 0
assert sg.number_of_edges("develops") == 0
assert sg.num_nodes("developer") == 0
assert sg.num_edges("develops") == 0
assert F.array_equal(
sg.nodes["user"].data["h"], g.nodes["user"].data["h"][1:3]
)
......@@ -250,8 +250,8 @@ def test_subgraph1(idtype):
assert F.array_equal(
F.tensor(sg.edges["wishes"].data[dgl.EID]), F.tensor([1], g.idtype)
)
assert sg.number_of_nodes("developer") == 0
assert sg.number_of_edges("develops") == 0
assert sg.num_nodes("developer") == 0
assert sg.num_edges("develops") == 0
assert F.array_equal(
sg.nodes["user"].data["h"], g.nodes["user"].data["h"][1:3]
)
......@@ -307,7 +307,7 @@ def test_subgraph1(idtype):
)
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert g.num_nodes(ntype) == sg.num_nodes(ntype)
assert F.array_equal(
F.tensor(sg.edges["follows"].data[dgl.EID]), F.tensor([1], g.idtype)
......@@ -337,7 +337,7 @@ def test_subgraph1(idtype):
)
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert g.num_nodes(ntype) == sg.num_nodes(ntype)
assert F.array_equal(
F.tensor(sg.edges["plays"].data[dgl.EID]),
......@@ -361,7 +361,7 @@ def test_subgraph1(idtype):
assert set(sg.ntypes) == {"user", "game"}
assert set(sg.etypes) == {"follows", "plays", "wishes"}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
assert sg.num_nodes(ntype) == g.num_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order="eid")
src_g, dst_g = g.all_edges(etype=etype, order="eid")
......@@ -390,7 +390,7 @@ def test_subgraph1(idtype):
assert set(sg.ntypes) == {"developer", "game"}
assert set(sg.etypes) == {"develops"}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
assert sg.num_nodes(ntype) == g.num_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order="eid")
src_g, dst_g = g.all_edges(etype=etype, order="eid")
......@@ -454,7 +454,7 @@ def test_in_subgraph(idtype):
hg["liked-by"].edge_ids(u, v), subg["liked-by"].edata[dgl.EID]
)
assert edge_set == {(2, 0), (2, 1), (1, 0), (0, 0)}
assert subg["flips"].number_of_edges() == 0
assert subg["flips"].num_edges() == 0
for ntype in subg.ntypes:
assert dgl.NID not in subg.nodes[ntype].data
......
......@@ -73,7 +73,7 @@ def test_topological_nodes(idtype, n=100):
adjmat = g.adjacency_matrix(transpose=True)
def tensor_topo_traverse():
n = g.number_of_nodes()
n = g.num_nodes()
mask = F.copy_to(F.ones((n, 1)), F.cpu())
degree = F.spmm(adjmat, mask)
while F.reduce_sum(mask) != 0.0:
......
......@@ -65,7 +65,7 @@ def check_sort(spm, tag_arr=None, tag_pos=None):
def test_sort_with_tag(idtype):
num_nodes, num_adj, num_tags = 200, [20, 50], 5
g = create_test_heterograph(num_nodes, num_adj, idtype=idtype)
tag = F.tensor(np.random.choice(num_tags, g.number_of_nodes()))
tag = F.tensor(np.random.choice(num_tags, g.num_nodes()))
src, dst = g.edges()
edge_tag_dst = F.gather_row(tag, F.tensor(dst))
edge_tag_src = F.gather_row(tag, F.tensor(src))
......@@ -99,8 +99,8 @@ def test_sort_with_tag_bipartite(idtype):
num_nodes, num_adj, num_tags = 200, [20, 50], 5
g = create_test_heterograph(num_nodes, num_adj, idtype=idtype)
g = dgl.heterograph({("_U", "_E", "_V"): g.edges()})
utag = F.tensor(np.random.choice(num_tags, g.number_of_nodes("_U")))
vtag = F.tensor(np.random.choice(num_tags, g.number_of_nodes("_V")))
utag = F.tensor(np.random.choice(num_tags, g.num_nodes("_U")))
vtag = F.tensor(np.random.choice(num_tags, g.num_nodes("_V")))
new_g = dgl.sort_csr_by_tag(g, vtag)
old_csr = g.adjacency_matrix(scipy_fmt="csr")
......
......@@ -26,7 +26,7 @@ def test_to_block(idtype):
def check(g, bg, ntype, etype, dst_nodes, include_dst_in_src=True):
if dst_nodes is not None:
assert F.array_equal(bg.dstnodes[ntype].data[dgl.NID], dst_nodes)
n_dst_nodes = bg.number_of_nodes("DST/" + ntype)
n_dst_nodes = bg.num_nodes("DST/" + ntype)
if include_dst_in_src:
assert F.array_equal(
bg.srcnodes[ntype].data[dgl.NID][:n_dst_nodes],
......@@ -136,21 +136,21 @@ def test_to_block(idtype):
bg = dgl.to_block(g_ab)
assert bg.idtype == idtype
assert bg.number_of_nodes("SRC/B") == 4
assert bg.num_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
assert bg.num_nodes("DST/A") == 0
checkall(g_ab, bg, None)
check_features(g_ab, bg)
dst_nodes = {"B": F.tensor([5, 6, 3, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes)
assert bg.number_of_nodes("SRC/B") == 4
assert bg.num_nodes("SRC/B") == 4
assert F.array_equal(
bg.srcnodes["B"].data[dgl.NID], bg.dstnodes["B"].data[dgl.NID]
)
assert bg.number_of_nodes("DST/A") == 0
assert bg.num_nodes("DST/A") == 0
checkall(g, bg, dst_nodes)
check_features(g, bg)
......
......@@ -198,7 +198,7 @@ def test_gat_conv(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(10, out_dim, num_heads, residual=True)
......@@ -222,7 +222,7 @@ def test_gat_conv_bi(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -291,7 +291,7 @@ def test_gg_conv():
# test#1: basic
h0 = F.randn((20, 10))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
etypes = nd.random.randint(0, 4, g.num_edges()).as_in_context(ctx)
h1 = gg_conv(g, h0, etypes)
assert h1.shape == (20, 20)
......@@ -421,7 +421,7 @@ def test_dense_sage_conv(idtype, g, out_dim):
F.randn((g.number_of_dst_nodes(), 5)),
)
else:
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
......@@ -508,7 +508,7 @@ def test_gmm_conv(g, idtype):
gmm_conv = nn.GMMConv(5, 2, 5, 3, "max")
gmm_conv.initialize(ctx=ctx)
h0 = F.randn((g.number_of_src_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
pseudo = F.randn((g.num_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
......@@ -523,7 +523,7 @@ def test_gmm_conv_bi(g, idtype):
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
pseudo = F.randn((g.number_of_edges(), 5))
pseudo = F.randn((g.num_edges(), 5))
h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
......@@ -537,7 +537,7 @@ def test_nn_conv(g, idtype):
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
etypes = nd.random.randint(0, 4, g.num_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
......@@ -552,7 +552,7 @@ def test_nn_conv_bi(g, idtype):
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
etypes = nd.random.randint(0, 4, g.num_edges()).as_in_context(ctx)
h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
......@@ -568,9 +568,9 @@ def test_sg_conv(out_dim):
print(sgc)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = sgc(g, h0)
assert h1.shape == (g.number_of_nodes(), out_dim)
assert h1.shape == (g.num_nodes(), out_dim)
def test_set2set():
......@@ -582,13 +582,13 @@ def test_set2set():
print(s2s)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = s2s(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = s2s(bg, h0)
assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.ndim == 2
......@@ -601,13 +601,13 @@ def test_glob_att_pool():
gap.initialize(ctx=ctx)
print(gap)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
......@@ -622,7 +622,7 @@ def test_simple_pool():
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = sum_pool(g, h0)
check_close(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
......@@ -635,7 +635,7 @@ def test_simple_pool():
# test#2: batched graph
g_ = dgl.from_networkx(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = mx.nd.stack(
F.sum(h0[:15], 0),
......@@ -680,7 +680,7 @@ def test_rgcn(O):
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
for i in range(g.num_edges()):
etype.append(i % 5)
B = 2
I = 10
......@@ -701,7 +701,7 @@ def test_rgcn(O):
assert list(h_new.shape) == [100, O]
# with norm
norm = nd.zeros((g.number_of_edges(), 1), ctx=ctx)
norm = nd.zeros((g.num_edges(), 1), ctx=ctx)
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
......@@ -768,7 +768,7 @@ def test_sequential():
graph.ndata["h"] = n_feat
graph.update_all(fn.copy_u("h", "m"), fn.sum("m", "h"))
n_feat += graph.ndata["h"]
return n_feat.reshape(graph.number_of_nodes() // 2, 2, -1).sum(1)
return n_feat.reshape(graph.num_nodes() // 2, 2, -1).sum(1)
g1 = dgl.from_networkx(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
g2 = dgl.from_networkx(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
......
......@@ -96,7 +96,7 @@ def run_client(graph_name, cli_id, part_id, server_count):
)
g = DistGraph(graph_name, gpb=gpb)
policy = dgl.distributed.PartitionPolicy("node", g.get_partition_book())
num_nodes = g.number_of_nodes()
num_nodes = g.num_nodes()
emb_dim = 4
dgl_emb = DistEmbedding(
num_nodes,
......
......@@ -267,7 +267,7 @@ def test_set2set():
print(s2s)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = s2s(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
......@@ -275,7 +275,7 @@ def test_set2set():
g1 = dgl.DGLGraph(nx.path_graph(11)).to(F.ctx())
g2 = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g1, g2])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = s2s(bg, h0)
assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.dim() == 2
......@@ -293,13 +293,13 @@ def test_glob_att_pool():
th.save(gap, tmp_buffer)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2
......@@ -316,7 +316,7 @@ def test_simple_pool():
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
sum_pool = sum_pool.to(ctx)
avg_pool = avg_pool.to(ctx)
max_pool = max_pool.to(ctx)
......@@ -333,7 +333,7 @@ def test_simple_pool():
# test#2: batched graph
g_ = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = th.stack(
[
......@@ -390,7 +390,7 @@ def test_set_trans():
print(st_enc_0, st_enc_1, st_dec)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 50))
h0 = F.randn((g.num_nodes(), 50))
h1 = st_enc_0(g, h0)
assert h1.shape == h0.shape
h1 = st_enc_1(g, h0)
......@@ -402,7 +402,7 @@ def test_set_trans():
g1 = dgl.DGLGraph(nx.path_graph(5))
g2 = dgl.DGLGraph(nx.path_graph(10))
bg = dgl.batch([g, g1, g2])
h0 = F.randn((bg.number_of_nodes(), 50))
h0 = F.randn((bg.num_nodes(), 50))
h1 = st_enc_0(bg, h0)
assert h1.shape == h0.shape
h1 = st_enc_1(bg, h0)
......@@ -421,14 +421,14 @@ def test_rgcn(idtype, O):
g = g.astype(idtype).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
for i in range(g.num_edges()):
etype.append(i % 5)
B = 2
I = 10
h = th.randn((100, I)).to(ctx)
r = th.tensor(etype).to(ctx)
norm = th.rand((g.number_of_edges(), 1)).to(ctx)
norm = th.rand((g.num_edges(), 1)).to(ctx)
sorted_r, idx = th.sort(r)
sorted_g = dgl.reorder_graph(
g,
......@@ -482,13 +482,13 @@ def test_rgcn_default_nbasis(idtype, O):
g = g.astype(idtype).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
for i in range(g.num_edges()):
etype.append(i % 5)
I = 10
h = th.randn((100, I)).to(ctx)
r = th.tensor(etype).to(ctx)
norm = th.rand((g.number_of_edges(), 1)).to(ctx)
norm = th.rand((g.num_edges(), 1)).to(ctx)
sorted_r, idx = th.sort(r)
sorted_g = dgl.reorder_graph(
g,
......@@ -552,7 +552,7 @@ def test_gat_conv(g, idtype, out_dim, num_heads):
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(5, out_dim, num_heads, residual=True)
......@@ -576,7 +576,7 @@ def test_gat_conv_bi(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -598,7 +598,7 @@ def test_gatv2_conv(g, idtype, out_dim, num_heads):
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(5, out_dim, num_heads, residual=True)
......@@ -622,7 +622,7 @@ def test_gatv2_conv_bi(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -640,17 +640,17 @@ def test_egat_conv(g, idtype, out_node_feats, out_edge_feats, num_heads):
out_edge_feats=out_edge_feats,
num_heads=num_heads,
)
nfeat = F.randn((g.number_of_nodes(), 10))
efeat = F.randn((g.number_of_edges(), 5))
nfeat = F.randn((g.num_nodes(), 10))
efeat = F.randn((g.num_edges(), 5))
egat = egat.to(ctx)
h, f = egat(g, nfeat, efeat)
th.save(egat, tmp_buffer)
assert h.shape == (g.number_of_nodes(), num_heads, out_node_feats)
assert f.shape == (g.number_of_edges(), num_heads, out_edge_feats)
assert h.shape == (g.num_nodes(), num_heads, out_node_feats)
assert f.shape == (g.num_edges(), num_heads, out_edge_feats)
_, _, attn = egat(g, nfeat, efeat, True)
assert attn.shape == (g.number_of_edges(), num_heads, 1)
assert attn.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -672,16 +672,16 @@ def test_egat_conv_bi(g, idtype, out_node_feats, out_edge_feats, num_heads):
F.randn((g.number_of_src_nodes(), 10)),
F.randn((g.number_of_dst_nodes(), 15)),
)
efeat = F.randn((g.number_of_edges(), 7))
efeat = F.randn((g.num_edges(), 7))
egat = egat.to(ctx)
h, f = egat(g, nfeat, efeat)
th.save(egat, tmp_buffer)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_node_feats)
assert f.shape == (g.number_of_edges(), num_heads, out_edge_feats)
assert f.shape == (g.num_edges(), num_heads, out_edge_feats)
_, _, attn = egat(g, nfeat, efeat, True)
assert attn.shape == (g.number_of_edges(), num_heads, 1)
assert attn.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -751,7 +751,7 @@ def test_sgc_conv(g, idtype, out_dim):
# test pickle
th.save(sgc, tmp_buffer)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
sgc = sgc.to(ctx)
h = sgc(g, feat)
......@@ -772,7 +772,7 @@ def test_appnp_conv(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
appnp = nn.APPNPConv(10, 0.1)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
appnp = appnp.to(ctx)
# test pickle
......@@ -788,7 +788,7 @@ def test_appnp_conv_e_weight(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
appnp = nn.APPNPConv(10, 0.1)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
eweight = F.ones((g.num_edges(),))
appnp = appnp.to(ctx)
......@@ -805,7 +805,7 @@ def test_gcn2conv_e_weight(g, idtype, bias):
gcn2conv = nn.GCN2Conv(
5, layer=2, alpha=0.5, bias=bias, project_initial_features=True
)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
eweight = F.ones((g.num_edges(),))
gcn2conv = gcn2conv.to(ctx)
res = feat
......@@ -819,7 +819,7 @@ def test_sgconv_e_weight(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
sgconv = nn.SGConv(5, 5, 3)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
eweight = F.ones((g.num_edges(),))
sgconv = sgconv.to(ctx)
h = sgconv(g, feat, edge_weight=eweight)
......@@ -833,7 +833,7 @@ def test_tagconv_e_weight(g, idtype):
g = g.astype(idtype).to(ctx)
conv = nn.TAGConv(5, 5, bias=True)
conv = conv.to(ctx)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
eweight = F.ones((g.num_edges(),))
conv = conv.to(ctx)
h = conv(g, feat, edge_weight=eweight)
......@@ -938,8 +938,8 @@ def test_gated_graph_conv(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
ggconv = nn.GatedGraphConv(5, 10, 5, 3)
etypes = th.arange(g.number_of_edges()) % 3
feat = F.randn((g.number_of_nodes(), 5))
etypes = th.arange(g.num_edges()) % 3
feat = F.randn((g.num_nodes(), 5))
ggconv = ggconv.to(ctx)
etypes = etypes.to(ctx)
......@@ -954,8 +954,8 @@ def test_gated_graph_conv_one_etype(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
ggconv = nn.GatedGraphConv(5, 10, 5, 1)
etypes = th.zeros(g.number_of_edges())
feat = F.randn((g.number_of_nodes(), 5))
etypes = th.zeros(g.num_edges())
feat = F.randn((g.num_nodes(), 5))
ggconv = ggconv.to(ctx)
etypes = etypes.to(ctx)
......@@ -976,7 +976,7 @@ def test_nn_conv(g, idtype):
edge_func = th.nn.Linear(4, 5 * 10)
nnconv = nn.NNConv(5, 10, edge_func, "mean")
feat = F.randn((g.number_of_src_nodes(), 5))
efeat = F.randn((g.number_of_edges(), 4))
efeat = F.randn((g.num_edges(), 4))
nnconv = nnconv.to(ctx)
h = nnconv(g, feat, efeat)
# currently we only do shape check
......@@ -992,7 +992,7 @@ def test_nn_conv_bi(g, idtype):
nnconv = nn.NNConv((5, 2), 10, edge_func, "mean")
feat = F.randn((g.number_of_src_nodes(), 5))
feat_dst = F.randn((g.number_of_dst_nodes(), 2))
efeat = F.randn((g.number_of_edges(), 4))
efeat = F.randn((g.num_edges(), 4))
nnconv = nnconv.to(ctx)
h = nnconv(g, (feat, feat_dst), efeat)
# currently we only do shape check
......@@ -1005,8 +1005,8 @@ def test_gmm_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmmconv = nn.GMMConv(5, 10, 3, 4, "mean")
feat = F.randn((g.number_of_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 3))
feat = F.randn((g.num_nodes(), 5))
pseudo = F.randn((g.num_edges(), 3))
gmmconv = gmmconv.to(ctx)
h = gmmconv(g, feat, pseudo)
# currently we only do shape check
......@@ -1023,7 +1023,7 @@ def test_gmm_conv_bi(g, idtype):
gmmconv = nn.GMMConv((5, 2), 10, 3, 4, "mean")
feat = F.randn((g.number_of_src_nodes(), 5))
feat_dst = F.randn((g.number_of_dst_nodes(), 2))
pseudo = F.randn((g.number_of_edges(), 3))
pseudo = F.randn((g.num_edges(), 3))
gmmconv = gmmconv.to(ctx)
h = gmmconv(g, (feat, feat_dst), pseudo)
# currently we only do shape check
......@@ -1070,7 +1070,7 @@ def test_dense_sage_conv(g, idtype, out_dim):
F.randn((g.number_of_dst_nodes(), 5)),
)
else:
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
sage = sage.to(ctx)
dense_sage = dense_sage.to(ctx)
out_sage = sage(g, feat)
......@@ -1130,7 +1130,7 @@ def test_dotgat_conv(g, idtype, out_dim, num_heads):
h = dotgat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = dotgat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -1149,7 +1149,7 @@ def test_dotgat_conv_bi(g, idtype, out_dim, num_heads):
h = dotgat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = dotgat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@pytest.mark.parametrize("out_dim", [1, 2])
......@@ -1216,7 +1216,7 @@ def test_sequential():
graph.ndata["h"] = n_feat
graph.update_all(fn.copy_u("h", "m"), fn.sum("m", "h"))
n_feat += graph.ndata["h"]
return n_feat.view(graph.number_of_nodes() // 2, 2, -1).sum(1)
return n_feat.view(graph.num_nodes() // 2, 2, -1).sum(1)
g1 = dgl.DGLGraph(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
g2 = dgl.DGLGraph(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
......@@ -1243,8 +1243,8 @@ def test_atomic_conv(g, idtype):
if F.gpu_ctx():
aconv = aconv.to(ctx)
feat = F.randn((g.number_of_nodes(), 1))
dist = F.randn((g.number_of_edges(), 1))
feat = F.randn((g.num_nodes(), 1))
dist = F.randn((g.num_edges(), 1))
h = aconv(g, feat, dist)
......@@ -1268,7 +1268,7 @@ def test_cf_conv(g, idtype, out_dim):
cfconv = cfconv.to(ctx)
src_feats = F.randn((g.number_of_src_nodes(), 2))
edge_feats = F.randn((g.number_of_edges(), 3))
edge_feats = F.randn((g.num_edges(), 3))
h = cfconv(g, src_feats, edge_feats)
# current we only do shape check
assert h.shape[-1] == out_dim
......
......@@ -141,7 +141,7 @@ def test_simple_pool():
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = sum_pool(g, h0)
assert F.allclose(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
......@@ -154,7 +154,7 @@ def test_simple_pool():
# test#2: batched graph
g_ = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = tf.stack(
[
......@@ -205,13 +205,13 @@ def test_glob_att_pool():
print(gap)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h0 = F.randn((g.num_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h0 = F.randn((bg.num_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
......@@ -224,7 +224,7 @@ def test_rgcn(O):
)
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
for i in range(g.num_edges()):
etype.append(i % 5)
B = 2
I = 10
......@@ -256,7 +256,7 @@ def test_rgcn(O):
assert F.allclose(h_new, h_new_low)
# with norm
norm = tf.zeros((g.number_of_edges(), 1))
norm = tf.zeros((g.num_edges(), 1))
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis_low = nn.RelGraphConv(I, O, R, "basis", B, low_mem=True)
......@@ -313,7 +313,7 @@ def test_gat_conv(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(5, out_dim, num_heads, residual=True)
......@@ -335,7 +335,7 @@ def test_gat_conv_bi(g, idtype, out_dim, num_heads):
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
assert a.shape == (g.num_edges(), num_heads, 1)
@parametrize_idtype
......@@ -397,7 +397,7 @@ def test_sgc_conv(g, idtype, out_dim):
g = g.astype(idtype).to(ctx)
# not cached
sgc = nn.SGConv(5, out_dim, 3)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
h = sgc(g, feat)
assert h.shape[-1] == out_dim
......@@ -416,7 +416,7 @@ def test_appnp_conv(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
appnp = nn.APPNPConv(10, 0.1)
feat = F.randn((g.number_of_nodes(), 5))
feat = F.randn((g.num_nodes(), 5))
h = appnp(g, feat)
assert h.shape[-1] == 5
......
......@@ -119,10 +119,10 @@ def test_parmetis_postprocessing():
num_chunks = 2
g = create_chunked_dataset(root_dir, num_chunks)
num_nodes = g.number_of_nodes()
num_institutions = g.number_of_nodes("institution")
num_authors = g.number_of_nodes("author")
num_papers = g.number_of_nodes("paper")
num_nodes = g.num_nodes()
num_institutions = g.num_nodes("institution")
num_authors = g.num_nodes("author")
num_papers = g.num_nodes("paper")
# Generate random parmetis partition ids for the nodes in the graph.
# Replace this code with actual ParMETIS executable when it is ready
......@@ -192,9 +192,9 @@ def test_parmetis_wrapper():
all_ntypes = g.ntypes
all_etypes = g.etypes
num_constraints = len(all_ntypes) + 3
num_institutions = g.number_of_nodes("institution")
num_authors = g.number_of_nodes("author")
num_papers = g.number_of_nodes("paper")
num_institutions = g.num_nodes("institution")
num_authors = g.num_nodes("author")
num_papers = g.num_nodes("paper")
# Trigger ParMETIS.
schema_file = os.path.join(root_dir, "chunked-data/metadata.json")
......@@ -211,8 +211,8 @@ def test_parmetis_wrapper():
f.write("127.0.0.1\n")
f.write("127.0.0.1\n")
num_nodes = g.number_of_nodes()
num_edges = g.number_of_edges()
num_nodes = g.num_nodes()
num_edges = g.num_edges()
stats_file = f"{graph_name}_stats.txt"
with open(stats_file, "w") as f:
f.write(f"{num_nodes} {num_edges} {num_constraints}")
......
......@@ -21,7 +21,7 @@ def check_fail(fn, *args, **kwargs):
def assert_is_identical(g, g2):
assert g.number_of_nodes() == g2.number_of_nodes()
assert g.num_nodes() == g2.num_nodes()
src, dst = g.all_edges(order="eid")
src2, dst2 = g2.all_edges(order="eid")
assert F.array_equal(src, src2)
......@@ -45,7 +45,7 @@ def assert_is_identical_hetero(g, g2, ignore_internal_data=False):
# check if node ID spaces and feature spaces are equal
for ntype in g.ntypes:
assert g.number_of_nodes(ntype) == g2.number_of_nodes(ntype)
assert g.num_nodes(ntype) == g2.num_nodes(ntype)
if ignore_internal_data:
for k in list(g.nodes[ntype].data.keys()):
if is_internal_column(k):
......@@ -91,10 +91,10 @@ def check_graph_equal(g1, g2, *, check_idtype=True, check_feature=True):
assert g2.metagraph().edges(keys=True)[edges] == features
for nty in g1.ntypes:
assert g1.number_of_nodes(nty) == g2.number_of_nodes(nty)
assert g1.num_nodes(nty) == g2.num_nodes(nty)
assert F.allclose(g1.batch_num_nodes(nty), g2.batch_num_nodes(nty))
for ety in g1.canonical_etypes:
assert g1.number_of_edges(ety) == g2.number_of_edges(ety)
assert g1.num_edges(ety) == g2.num_edges(ety)
assert F.allclose(g1.batch_num_edges(ety), g2.batch_num_edges(ety))
src1, dst1, eid1 = g1.edges(etype=ety, form="all")
src2, dst2, eid2 = g2.edges(etype=ety, form="all")
......@@ -109,14 +109,14 @@ def check_graph_equal(g1, g2, *, check_idtype=True, check_feature=True):
if check_feature:
for nty in g1.ntypes:
if g1.number_of_nodes(nty) == 0:
if g1.num_nodes(nty) == 0:
continue
for feat_name in g1.nodes[nty].data.keys():
assert F.allclose(
g1.nodes[nty].data[feat_name], g2.nodes[nty].data[feat_name]
)
for ety in g1.canonical_etypes:
if g1.number_of_edges(ety) == 0:
if g1.num_edges(ety) == 0:
continue
for feat_name in g2.edges[ety].data.keys():
assert F.allclose(
......
......@@ -75,8 +75,8 @@ def graph1():
),
device=F.cpu(),
)
g.ndata["h"] = F.copy_to(F.randn((g.number_of_nodes(), 2)), F.cpu())
g.edata["w"] = F.copy_to(F.randn((g.number_of_edges(), 3)), F.cpu())
g.ndata["h"] = F.copy_to(F.randn((g.num_nodes(), 2)), F.cpu())
g.edata["w"] = F.copy_to(F.randn((g.num_edges(), 3)), F.cpu())
return g
......@@ -89,10 +89,8 @@ def graph1():
),
device=F.cpu(),
)
g.ndata["h"] = F.copy_to(F.randn((g.number_of_nodes(), 2)), F.cpu())
g.edata["scalar_w"] = F.copy_to(
F.abs(F.randn((g.number_of_edges(),))), F.cpu()
)
g.ndata["h"] = F.copy_to(F.randn((g.num_nodes(), 2)), F.cpu())
g.edata["scalar_w"] = F.copy_to(F.abs(F.randn((g.num_edges(),))), F.cpu())
return g
......@@ -129,19 +127,19 @@ def heterograph0():
device=F.cpu(),
)
g.nodes["user"].data["h"] = F.copy_to(
F.randn((g.number_of_nodes("user"), 3)), F.cpu()
F.randn((g.num_nodes("user"), 3)), F.cpu()
)
g.nodes["game"].data["h"] = F.copy_to(
F.randn((g.number_of_nodes("game"), 2)), F.cpu()
F.randn((g.num_nodes("game"), 2)), F.cpu()
)
g.nodes["developer"].data["h"] = F.copy_to(
F.randn((g.number_of_nodes("developer"), 3)), F.cpu()
F.randn((g.num_nodes("developer"), 3)), F.cpu()
)
g.edges["plays"].data["h"] = F.copy_to(
F.randn((g.number_of_edges("plays"), 1)), F.cpu()
F.randn((g.num_edges("plays"), 1)), F.cpu()
)
g.edges["develops"].data["h"] = F.copy_to(
F.randn((g.number_of_edges("develops"), 5)), F.cpu()
F.randn((g.num_edges("develops"), 5)), F.cpu()
)
return g
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment