"git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "f96b7606582bd6ca1a779d7c346083f578352ac7"
Unverified Commit 89b321b8 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Rename number_of_edges and number_of_nodes to num_edges and num_nodes. (#5488)



* nn-only

* data-only

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 7f5da697
...@@ -191,8 +191,8 @@ class CitationGraphDataset(DGLBuiltinDataset): ...@@ -191,8 +191,8 @@ class CitationGraphDataset(DGLBuiltinDataset):
if self.verbose: if self.verbose:
print("Finished data loading and preprocessing.") print("Finished data loading and preprocessing.")
print(" NumNodes: {}".format(self._g.number_of_nodes())) print(" NumNodes: {}".format(self._g.num_nodes()))
print(" NumEdges: {}".format(self._g.number_of_edges())) print(" NumEdges: {}".format(self._g.num_edges()))
print(" NumFeats: {}".format(self._g.ndata["feat"].shape[1])) print(" NumFeats: {}".format(self._g.ndata["feat"].shape[1]))
print(" NumClasses: {}".format(self.num_classes)) print(" NumClasses: {}".format(self.num_classes))
print( print(
...@@ -256,8 +256,8 @@ class CitationGraphDataset(DGLBuiltinDataset): ...@@ -256,8 +256,8 @@ class CitationGraphDataset(DGLBuiltinDataset):
# hack for mxnet compatability # hack for mxnet compatability
if self.verbose: if self.verbose:
print(" NumNodes: {}".format(self._g.number_of_nodes())) print(" NumNodes: {}".format(self._g.num_nodes()))
print(" NumEdges: {}".format(self._g.number_of_edges())) print(" NumEdges: {}".format(self._g.num_edges()))
print(" NumFeats: {}".format(self._g.ndata["feat"].shape[1])) print(" NumFeats: {}".format(self._g.ndata["feat"].shape[1]))
print(" NumClasses: {}".format(self.num_classes)) print(" NumClasses: {}".format(self.num_classes))
print( print(
......
...@@ -261,7 +261,7 @@ class GINDataset(DGLBuiltinDataset): ...@@ -261,7 +261,7 @@ class GINDataset(DGLBuiltinDataset):
if len(self.nlabel_dict) > 1: if len(self.nlabel_dict) > 1:
self.nlabels_flag = True self.nlabels_flag = True
assert g.number_of_nodes() == n_nodes assert g.num_nodes() == n_nodes
# update statistics of graphs # update statistics of graphs
self.n += n_nodes self.n += n_nodes
...@@ -308,9 +308,9 @@ class GINDataset(DGLBuiltinDataset): ...@@ -308,9 +308,9 @@ class GINDataset(DGLBuiltinDataset):
label2idx = {nlabel_set[i]: i for i in range(len(nlabel_set))} label2idx = {nlabel_set[i]: i for i in range(len(nlabel_set))}
# generate node attr by node label # generate node attr by node label
for g in self.graphs: for g in self.graphs:
attr = np.zeros((g.number_of_nodes(), len(label2idx))) attr = np.zeros((g.num_nodes(), len(label2idx)))
attr[ attr[
range(g.number_of_nodes()), range(g.num_nodes()),
[ [
label2idx[nl] label2idx[nl]
for nl in F.asnumpy(g.ndata["label"]).tolist() for nl in F.asnumpy(g.ndata["label"]).tolist()
......
...@@ -93,8 +93,8 @@ class GNNBenchmarkDataset(DGLBuiltinDataset): ...@@ -93,8 +93,8 @@ class GNNBenchmarkDataset(DGLBuiltinDataset):
def _print_info(self): def _print_info(self):
if self.verbose: if self.verbose:
print(" NumNodes: {}".format(self._graph.number_of_nodes())) print(" NumNodes: {}".format(self._graph.num_nodes()))
print(" NumEdges: {}".format(self._graph.number_of_edges())) print(" NumEdges: {}".format(self._graph.num_edges()))
print(" NumFeats: {}".format(self._graph.ndata["feat"].shape[-1])) print(" NumFeats: {}".format(self._graph.ndata["feat"].shape[-1]))
print(" NumbClasses: {}".format(self.num_classes)) print(" NumbClasses: {}".format(self.num_classes))
......
...@@ -67,13 +67,13 @@ class HeteroGraphData(ObjectBase): ...@@ -67,13 +67,13 @@ class HeteroGraphData(ObjectBase):
ntensor[i]: F.zerocopy_from_dgl_ndarray(ntensor[i + 1]) ntensor[i]: F.zerocopy_from_dgl_ndarray(ntensor[i + 1])
for i in range(0, len(ntensor), 2) for i in range(0, len(ntensor), 2)
} }
nframes.append(Frame(ndict, num_rows=gidx.number_of_nodes(ntid))) nframes.append(Frame(ndict, num_rows=gidx.num_nodes(ntid)))
for etid, etensor in enumerate(etensor_list): for etid, etensor in enumerate(etensor_list):
edict = { edict = {
etensor[i]: F.zerocopy_from_dgl_ndarray(etensor[i + 1]) etensor[i]: F.zerocopy_from_dgl_ndarray(etensor[i + 1])
for i in range(0, len(etensor), 2) for i in range(0, len(etensor), 2)
} }
eframes.append(Frame(edict, num_rows=gidx.number_of_edges(etid))) eframes.append(Frame(edict, num_rows=gidx.num_edges(etid)))
return DGLGraph(gidx, ntype_names, etype_names, nframes, eframes) return DGLGraph(gidx, ntype_names, etype_names, nframes, eframes)
...@@ -461,8 +461,8 @@ class FB15k237Dataset(KnowledgeGraphDataset): ...@@ -461,8 +461,8 @@ class FB15k237Dataset(KnowledgeGraphDataset):
>>> val_mask = g.edata['val_mask'] >>> val_mask = g.edata['val_mask']
>>> test_mask = g.edata['test_mask'] >>> test_mask = g.edata['test_mask']
>>> >>>
>>> train_set = th.arange(g.number_of_edges())[train_mask] >>> train_set = th.arange(g.num_edges())[train_mask]
>>> val_set = th.arange(g.number_of_edges())[val_mask] >>> val_set = th.arange(g.num_edges())[val_mask]
>>> >>>
>>> # build train_g >>> # build train_g
>>> train_edges = train_set >>> train_edges = train_set
...@@ -577,8 +577,8 @@ class FB15kDataset(KnowledgeGraphDataset): ...@@ -577,8 +577,8 @@ class FB15kDataset(KnowledgeGraphDataset):
>>> train_mask = g.edata['train_mask'] >>> train_mask = g.edata['train_mask']
>>> val_mask = g.edata['val_mask'] >>> val_mask = g.edata['val_mask']
>>> >>>
>>> train_set = th.arange(g.number_of_edges())[train_mask] >>> train_set = th.arange(g.num_edges())[train_mask]
>>> val_set = th.arange(g.number_of_edges())[val_mask] >>> val_set = th.arange(g.num_edges())[val_mask]
>>> >>>
>>> # build train_g >>> # build train_g
>>> train_edges = train_set >>> train_edges = train_set
...@@ -693,8 +693,8 @@ class WN18Dataset(KnowledgeGraphDataset): ...@@ -693,8 +693,8 @@ class WN18Dataset(KnowledgeGraphDataset):
>>> train_mask = g.edata['train_mask'] >>> train_mask = g.edata['train_mask']
>>> val_mask = g.edata['val_mask'] >>> val_mask = g.edata['val_mask']
>>> >>>
>>> train_set = th.arange(g.number_of_edges())[train_mask] >>> train_set = th.arange(g.num_edges())[train_mask]
>>> val_set = th.arange(g.number_of_edges())[val_mask] >>> val_set = th.arange(g.num_edges())[val_mask]
>>> >>>
>>> # build train_g >>> # build train_g
>>> train_edges = train_set >>> train_edges = train_set
......
...@@ -266,10 +266,10 @@ class RDFGraphDataset(DGLBuiltinDataset): ...@@ -266,10 +266,10 @@ class RDFGraphDataset(DGLBuiltinDataset):
) )
train_mask = idx2mask( train_mask = idx2mask(
train_idx, self._hg.number_of_nodes(self.predict_category) train_idx, self._hg.num_nodes(self.predict_category)
) )
test_mask = idx2mask( test_mask = idx2mask(
test_idx, self._hg.number_of_nodes(self.predict_category) test_idx, self._hg.num_nodes(self.predict_category)
) )
labels = F.tensor(labels, F.data_type_dict["int64"]) labels = F.tensor(labels, F.data_type_dict["int64"])
...@@ -313,8 +313,8 @@ class RDFGraphDataset(DGLBuiltinDataset): ...@@ -313,8 +313,8 @@ class RDFGraphDataset(DGLBuiltinDataset):
g.ndata[dgl.NTYPE] = F.tensor(ntid) g.ndata[dgl.NTYPE] = F.tensor(ntid)
g.edata[dgl.ETYPE] = F.tensor(etid) g.edata[dgl.ETYPE] = F.tensor(etid)
if self.verbose: if self.verbose:
print("Total #nodes:", g.number_of_nodes()) print("Total #nodes:", g.num_nodes())
print("Total #edges:", g.number_of_edges()) print("Total #edges:", g.num_edges())
# rename names such as 'type' so that they an be used as keys # rename names such as 'type' so that they an be used as keys
# to nn.ModuleDict # to nn.ModuleDict
...@@ -356,9 +356,7 @@ class RDFGraphDataset(DGLBuiltinDataset): ...@@ -356,9 +356,7 @@ class RDFGraphDataset(DGLBuiltinDataset):
Number of classes Number of classes
""" """
label_dict = {} label_dict = {}
labels = ( labels = np.zeros((self._hg.num_nodes(self.predict_category),)) - 1
np.zeros((self._hg.number_of_nodes(self.predict_category),)) - 1
)
train_idx = self.parse_idx_file( train_idx = self.parse_idx_file(
os.path.join(root_path, "trainingSet.tsv"), os.path.join(root_path, "trainingSet.tsv"),
ent2id, ent2id,
......
...@@ -168,8 +168,8 @@ class RedditDataset(DGLBuiltinDataset): ...@@ -168,8 +168,8 @@ class RedditDataset(DGLBuiltinDataset):
def _print_info(self): def _print_info(self):
if self.verbose: if self.verbose:
print("Finished data loading.") print("Finished data loading.")
print(" NumNodes: {}".format(self._graph.number_of_nodes())) print(" NumNodes: {}".format(self._graph.num_nodes()))
print(" NumEdges: {}".format(self._graph.number_of_edges())) print(" NumEdges: {}".format(self._graph.num_edges()))
print(" NumFeats: {}".format(self._graph.ndata["feat"].shape[1])) print(" NumFeats: {}".format(self._graph.ndata["feat"].shape[1]))
print(" NumClasses: {}".format(self.num_classes)) print(" NumClasses: {}".format(self.num_classes))
print( print(
......
...@@ -183,7 +183,7 @@ class LegacyTUDataset(DGLBuiltinDataset): ...@@ -183,7 +183,7 @@ class LegacyTUDataset(DGLBuiltinDataset):
if "feat" not in g.ndata.keys(): if "feat" not in g.ndata.keys():
for idxs, g in zip(node_idx_list, self.graph_lists): for idxs, g in zip(node_idx_list, self.graph_lists):
g.ndata["feat"] = F.ones( g.ndata["feat"] = F.ones(
(g.number_of_nodes(), self.hidden_size), F.float32, F.cpu() (g.num_nodes(), self.hidden_size), F.float32, F.cpu()
) )
self.data_mode = "constant" self.data_mode = "constant"
if self.verbose: if self.verbose:
...@@ -201,7 +201,7 @@ class LegacyTUDataset(DGLBuiltinDataset): ...@@ -201,7 +201,7 @@ class LegacyTUDataset(DGLBuiltinDataset):
if self.verbose: if self.verbose:
print("original dataset length : ", len(self.graph_lists)) print("original dataset length : ", len(self.graph_lists))
for i, g in enumerate(self.graph_lists): for i, g in enumerate(self.graph_lists):
if g.number_of_nodes() <= self.max_allow_node: if g.num_nodes() <= self.max_allow_node:
preserve_idx.append(i) preserve_idx.append(i)
self.graph_lists = [self.graph_lists[i] for i in preserve_idx] self.graph_lists = [self.graph_lists[i] for i in preserve_idx]
if self.verbose: if self.verbose:
......
...@@ -103,7 +103,7 @@ class APPNPConv(nn.Block): ...@@ -103,7 +103,7 @@ class APPNPConv(nn.Block):
feat = feat * norm feat = feat * norm
graph.ndata["h"] = feat graph.ndata["h"] = feat
graph.edata["w"] = self.edge_drop( graph.edata["w"] = self.edge_drop(
nd.ones((graph.number_of_edges(), 1), ctx=feat.context) nd.ones((graph.num_edges(), 1), ctx=feat.context)
) )
graph.update_all(fn.u_mul_e("h", "w", "m"), fn.sum("m", "h")) graph.update_all(fn.u_mul_e("h", "w", "m"), fn.sum("m", "h"))
feat = graph.ndata.pop("h") feat = graph.ndata.pop("h")
......
...@@ -235,7 +235,7 @@ class GMMConv(nn.Block): ...@@ -235,7 +235,7 @@ class GMMConv(nn.Block):
graph.srcdata["h"] = self.fc(feat_src).reshape( graph.srcdata["h"] = self.fc(feat_src).reshape(
-1, self._n_kernels, self._out_feats -1, self._n_kernels, self._out_feats
) )
E = graph.number_of_edges() E = graph.num_edges()
# compute gaussian weight # compute gaussian weight
gaussian = -0.5 * ( gaussian = -0.5 * (
( (
......
...@@ -174,7 +174,7 @@ class SAGEConv(nn.Block): ...@@ -174,7 +174,7 @@ class SAGEConv(nn.Block):
h_self = feat_dst h_self = feat_dst
# Handle the case of graphs without edges # Handle the case of graphs without edges
if graph.number_of_edges() == 0: if graph.num_edges() == 0:
dst_neigh = mx.nd.zeros( dst_neigh = mx.nd.zeros(
(graph.number_of_dst_nodes(), self._in_src_feats) (graph.number_of_dst_nodes(), self._in_src_feats)
) )
......
...@@ -58,7 +58,7 @@ class HeteroGraphConv(nn.Block): ...@@ -58,7 +58,7 @@ class HeteroGraphConv(nn.Block):
``'user'`` and ``'game'`` nodes. ``'user'`` and ``'game'`` nodes.
>>> import mxnet.ndarray as nd >>> import mxnet.ndarray as nd
>>> h1 = {'user' : nd.random.randn(g.number_of_nodes('user'), 5)} >>> h1 = {'user' : nd.random.randn(g.num_nodes('user'), 5)}
>>> h2 = conv(g, h1) >>> h2 = conv(g, h1)
>>> print(h2.keys()) >>> print(h2.keys())
dict_keys(['user', 'game']) dict_keys(['user', 'game'])
......
...@@ -186,7 +186,7 @@ class Sequential(gluon.nn.Sequential): ...@@ -186,7 +186,7 @@ class Sequential(gluon.nn.Sequential):
>>> graph.ndata['h'] = n_feat >>> graph.ndata['h'] = n_feat
>>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h')) >>> graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
>>> n_feat += graph.ndata['h'] >>> n_feat += graph.ndata['h']
>>> return n_feat.reshape(graph.number_of_nodes() // 2, 2, -1).sum(1) >>> return n_feat.reshape(graph.num_nodes() // 2, 2, -1).sum(1)
>>> >>>
>>> g1 = dgl.DGLGraph(nx.erdos_renyi_graph(32, 0.05)) >>> g1 = dgl.DGLGraph(nx.erdos_renyi_graph(32, 0.05))
>>> g2 = dgl.DGLGraph(nx.erdos_renyi_graph(16, 0.2)) >>> g2 = dgl.DGLGraph(nx.erdos_renyi_graph(16, 0.2))
......
...@@ -109,7 +109,7 @@ class APPNPConv(nn.Module): ...@@ -109,7 +109,7 @@ class APPNPConv(nn.Module):
feat = feat * src_norm feat = feat * src_norm
graph.ndata["h"] = feat graph.ndata["h"] = feat
w = ( w = (
th.ones(graph.number_of_edges(), 1) th.ones(graph.num_edges(), 1)
if edge_weight is None if edge_weight is None
else edge_weight else edge_weight
) )
......
...@@ -297,5 +297,5 @@ class AtomicConv(nn.Module): ...@@ -297,5 +297,5 @@ class AtomicConv(nn.Module):
graph.update_all(msg_func, reduce_func) graph.update_all(msg_func, reduce_func)
return graph.ndata["hv_new"].view( return graph.ndata["hv_new"].view(
graph.number_of_nodes(), -1 graph.num_nodes(), -1
) # (V, K * T) ) # (V, K * T)
...@@ -142,7 +142,7 @@ class GINConv(nn.Module): ...@@ -142,7 +142,7 @@ class GINConv(nn.Module):
with graph.local_scope(): with graph.local_scope():
aggregate_fn = fn.copy_u("h", "m") aggregate_fn = fn.copy_u("h", "m")
if edge_weight is not None: if edge_weight is not None:
assert edge_weight.shape[0] == graph.number_of_edges() assert edge_weight.shape[0] == graph.num_edges()
graph.edata["_edge_weight"] = edge_weight graph.edata["_edge_weight"] = edge_weight
aggregate_fn = fn.u_mul_e("h", "_edge_weight", "m") aggregate_fn = fn.u_mul_e("h", "_edge_weight", "m")
......
...@@ -243,7 +243,7 @@ class GMMConv(nn.Module): ...@@ -243,7 +243,7 @@ class GMMConv(nn.Module):
graph.srcdata["h"] = self.fc(feat_src).view( graph.srcdata["h"] = self.fc(feat_src).view(
-1, self._n_kernels, self._out_feats -1, self._n_kernels, self._out_feats
) )
E = graph.number_of_edges() E = graph.num_edges()
# compute gaussian weight # compute gaussian weight
gaussian = -0.5 * ( gaussian = -0.5 * (
( (
......
...@@ -418,7 +418,7 @@ class GraphConv(nn.Module): ...@@ -418,7 +418,7 @@ class GraphConv(nn.Module):
) )
aggregate_fn = fn.copy_u("h", "m") aggregate_fn = fn.copy_u("h", "m")
if edge_weight is not None: if edge_weight is not None:
assert edge_weight.shape[0] == graph.number_of_edges() assert edge_weight.shape[0] == graph.num_edges()
graph.edata["_edge_weight"] = edge_weight graph.edata["_edge_weight"] = edge_weight
aggregate_fn = fn.u_mul_e("h", "_edge_weight", "m") aggregate_fn = fn.u_mul_e("h", "_edge_weight", "m")
......
...@@ -214,14 +214,14 @@ class SAGEConv(nn.Module): ...@@ -214,14 +214,14 @@ class SAGEConv(nn.Module):
feat_dst = feat_src[: graph.number_of_dst_nodes()] feat_dst = feat_src[: graph.number_of_dst_nodes()]
msg_fn = fn.copy_u("h", "m") msg_fn = fn.copy_u("h", "m")
if edge_weight is not None: if edge_weight is not None:
assert edge_weight.shape[0] == graph.number_of_edges() assert edge_weight.shape[0] == graph.num_edges()
graph.edata["_edge_weight"] = edge_weight graph.edata["_edge_weight"] = edge_weight
msg_fn = fn.u_mul_e("h", "_edge_weight", "m") msg_fn = fn.u_mul_e("h", "_edge_weight", "m")
h_self = feat_dst h_self = feat_dst
# Handle the case of graphs without edges # Handle the case of graphs without edges
if graph.number_of_edges() == 0: if graph.num_edges() == 0:
graph.dstdata["neigh"] = torch.zeros( graph.dstdata["neigh"] = torch.zeros(
feat_dst.shape[0], self._in_src_feats feat_dst.shape[0], self._in_src_feats
).to(feat_dst) ).to(feat_dst)
......
...@@ -587,7 +587,7 @@ class TWIRLSUnfoldingAndAttention(nn.Module): ...@@ -587,7 +587,7 @@ class TWIRLSUnfoldingAndAttention(nn.Module):
""" """
Y = X Y = X
g.edata["w"] = tc.ones(g.number_of_edges(), 1, device=g.device) g.edata["w"] = tc.ones(g.num_edges(), 1, device=g.device)
g.ndata["deg"] = g.in_degrees().to(X) g.ndata["deg"] = g.in_degrees().to(X)
if self.init_att: if self.init_att:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment