"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "a43934371aa7fcbe41c27b9bb5ef94f4c01829fd"
Unverified Commit 5008af22 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Rename number_of_edges and number_of_nodes to num_edges and num_nodes in examples. (#5492)



* pytorch_example

* fix

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 3c8ac093
...@@ -65,7 +65,7 @@ def main(args): ...@@ -65,7 +65,7 @@ def main(args):
test_mask = g.ndata["test_mask"] test_mask = g.ndata["test_mask"]
num_feats = features.shape[1] num_feats = features.shape[1]
n_classes = data.num_labels n_classes = data.num_labels
n_edges = g.number_of_edges() n_edges = g.num_edges()
print( print(
"""----Data statistics------' """----Data statistics------'
#Edges %d #Edges %d
...@@ -85,7 +85,7 @@ def main(args): ...@@ -85,7 +85,7 @@ def main(args):
# add self loop # add self loop
g = dgl.remove_self_loop(g) g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g) g = dgl.add_self_loop(g)
n_edges = g.number_of_edges() n_edges = g.num_edges()
# create model # create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads] heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = HardGAT( model = HardGAT(
......
...@@ -199,7 +199,7 @@ class HGPSLPool(nn.Module): ...@@ -199,7 +199,7 @@ class HGPSLPool(nn.Module):
# top-k pool first # top-k pool first
if e_feat is None: if e_feat is None:
e_feat = torch.ones( e_feat = torch.ones(
(graph.number_of_edges(),), dtype=feat.dtype, device=feat.device (graph.num_edges(),), dtype=feat.dtype, device=feat.device
) )
batch_num_nodes = graph.batch_num_nodes() batch_num_nodes = graph.batch_num_nodes()
x_score = self.calc_info_score(graph, feat, e_feat) x_score = self.calc_info_score(graph, feat, e_feat)
......
...@@ -120,14 +120,13 @@ for ntype in G.ntypes: ...@@ -120,14 +120,13 @@ for ntype in G.ntypes:
for etype in G.etypes: for etype in G.etypes:
edge_dict[etype] = len(edge_dict) edge_dict[etype] = len(edge_dict)
G.edges[etype].data["id"] = ( G.edges[etype].data["id"] = (
torch.ones(G.number_of_edges(etype), dtype=torch.long) torch.ones(G.num_edges(etype), dtype=torch.long) * edge_dict[etype]
* edge_dict[etype]
) )
# Random initialize input feature # Random initialize input feature
for ntype in G.ntypes: for ntype in G.ntypes:
emb = nn.Parameter( emb = nn.Parameter(
torch.Tensor(G.number_of_nodes(ntype), 256), requires_grad=False torch.Tensor(G.num_nodes(ntype), 256), requires_grad=False
) )
nn.init.xavier_uniform_(emb) nn.init.xavier_uniform_(emb)
G.nodes[ntype].data["inp"] = emb G.nodes[ntype].data["inp"] = emb
......
...@@ -132,14 +132,14 @@ dataset = LanderDataset( ...@@ -132,14 +132,14 @@ dataset = LanderDataset(
features=features, labels=labels, k=args.knn_k, levels=1, faiss_gpu=False features=features, labels=labels, k=args.knn_k, levels=1, faiss_gpu=False
) )
g = dataset.gs[0] g = dataset.gs[0]
g.ndata["pred_den"] = torch.zeros((g.number_of_nodes())) g.ndata["pred_den"] = torch.zeros((g.num_nodes()))
g.edata["prob_conn"] = torch.zeros((g.number_of_edges(), 2)) g.edata["prob_conn"] = torch.zeros((g.num_edges(), 2))
global_labels = labels.copy() global_labels = labels.copy()
ids = np.arange(g.number_of_nodes()) ids = np.arange(g.num_nodes())
global_edges = ([], []) global_edges = ([], [])
global_peaks = np.array([], dtype=np.long) global_peaks = np.array([], dtype=np.long)
global_edges_len = len(global_edges[0]) global_edges_len = len(global_edges[0])
global_num_nodes = g.number_of_nodes() global_num_nodes = g.num_nodes()
global_densities = g.ndata["density"][:linsize] global_densities = g.ndata["density"][:linsize]
global_densities = np.sort(global_densities) global_densities = np.sort(global_densities)
...@@ -150,7 +150,7 @@ sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts) ...@@ -150,7 +150,7 @@ sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
# fix the number of edges # fix the number of edges
test_loader = dgl.dataloading.DataLoader( test_loader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=False, shuffle=False,
...@@ -222,7 +222,7 @@ for level in range(args.levels): ...@@ -222,7 +222,7 @@ for level in range(args.levels):
if level == 0: if level == 0:
global_pred_densities = g.ndata["pred_den"] global_pred_densities = g.ndata["pred_den"]
global_densities = g.ndata["density"] global_densities = g.ndata["density"]
g.edata["prob_conn"] = torch.zeros((g.number_of_edges(), 2)) g.edata["prob_conn"] = torch.zeros((g.num_edges(), 2))
ids = ids[peaks] ids = ids[peaks]
new_global_edges_len = len(global_edges[0]) new_global_edges_len = len(global_edges[0])
...@@ -258,11 +258,11 @@ for level in range(args.levels): ...@@ -258,11 +258,11 @@ for level in range(args.levels):
cluster_features=cluster_features, cluster_features=cluster_features,
) )
g = dataset.gs[0] g = dataset.gs[0]
g.ndata["pred_den"] = torch.zeros((g.number_of_nodes())) g.ndata["pred_den"] = torch.zeros((g.num_nodes()))
g.edata["prob_conn"] = torch.zeros((g.number_of_edges(), 2)) g.edata["prob_conn"] = torch.zeros((g.num_edges(), 2))
test_loader = dgl.dataloading.DataLoader( test_loader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=False, shuffle=False,
......
...@@ -119,7 +119,7 @@ def set_train_sampler_loader(g, k): ...@@ -119,7 +119,7 @@ def set_train_sampler_loader(g, k):
# fix the number of edges # fix the number of edges
train_dataloader = dgl.dataloading.DataLoader( train_dataloader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=True, shuffle=True,
......
...@@ -63,10 +63,10 @@ dataset = LanderDataset( ...@@ -63,10 +63,10 @@ dataset = LanderDataset(
) )
g = dataset.gs[0].to(device) g = dataset.gs[0].to(device)
global_labels = labels.copy() global_labels = labels.copy()
ids = np.arange(g.number_of_nodes()) ids = np.arange(g.num_nodes())
global_edges = ([], []) global_edges = ([], [])
global_edges_len = len(global_edges[0]) global_edges_len = len(global_edges[0])
global_num_nodes = g.number_of_nodes() global_num_nodes = g.num_nodes()
################## ##################
# Model Definition # Model Definition
......
...@@ -67,21 +67,21 @@ dataset = LanderDataset( ...@@ -67,21 +67,21 @@ dataset = LanderDataset(
faiss_gpu=args.faiss_gpu, faiss_gpu=args.faiss_gpu,
) )
g = dataset.gs[0] g = dataset.gs[0]
g.ndata["pred_den"] = torch.zeros((g.number_of_nodes())) g.ndata["pred_den"] = torch.zeros((g.num_nodes()))
g.edata["prob_conn"] = torch.zeros((g.number_of_edges(), 2)) g.edata["prob_conn"] = torch.zeros((g.num_edges(), 2))
global_labels = labels.copy() global_labels = labels.copy()
ids = np.arange(g.number_of_nodes()) ids = np.arange(g.num_nodes())
global_edges = ([], []) global_edges = ([], [])
global_peaks = np.array([], dtype=np.long) global_peaks = np.array([], dtype=np.long)
global_edges_len = len(global_edges[0]) global_edges_len = len(global_edges[0])
global_num_nodes = g.number_of_nodes() global_num_nodes = g.num_nodes()
fanouts = [args.knn_k - 1 for i in range(args.num_conv + 1)] fanouts = [args.knn_k - 1 for i in range(args.num_conv + 1)]
sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts) sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
# fix the number of edges # fix the number of edges
test_loader = dgl.dataloading.DataLoader( test_loader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=False, shuffle=False,
...@@ -183,11 +183,11 @@ for level in range(args.levels): ...@@ -183,11 +183,11 @@ for level in range(args.levels):
cluster_features=cluster_features, cluster_features=cluster_features,
) )
g = dataset.gs[0] g = dataset.gs[0]
g.ndata["pred_den"] = torch.zeros((g.number_of_nodes())) g.ndata["pred_den"] = torch.zeros((g.num_nodes()))
g.edata["prob_conn"] = torch.zeros((g.number_of_edges(), 2)) g.edata["prob_conn"] = torch.zeros((g.num_edges(), 2))
test_loader = dgl.dataloading.DataLoader( test_loader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=False, shuffle=False,
......
...@@ -83,7 +83,7 @@ def set_train_sampler_loader(g, k): ...@@ -83,7 +83,7 @@ def set_train_sampler_loader(g, k):
# fix the number of edges # fix the number of edges
train_dataloader = dgl.dataloading.DataLoader( train_dataloader = dgl.dataloading.DataLoader(
g, g,
torch.arange(g.number_of_nodes()), torch.arange(g.num_nodes()),
sampler, sampler,
batch_size=args.batch_size, batch_size=args.batch_size,
shuffle=True, shuffle=True,
......
...@@ -97,7 +97,7 @@ def get_edge_dist(g, threshold): ...@@ -97,7 +97,7 @@ def get_edge_dist(g, threshold):
def tree_generation(ng): def tree_generation(ng):
ng.ndata["keep_eid"] = torch.zeros(ng.number_of_nodes()).long() - 1 ng.ndata["keep_eid"] = torch.zeros(ng.num_nodes()).long() - 1
def message_func(edges): def message_func(edges):
return {"mval": edges.data["edge_dist"], "meid": edges.data[dgl.EID]} return {"mval": edges.data["edge_dist"], "meid": edges.data[dgl.EID]}
...@@ -112,12 +112,12 @@ def tree_generation(ng): ...@@ -112,12 +112,12 @@ def tree_generation(ng):
eids = ng.ndata["keep_eid"] eids = ng.ndata["keep_eid"]
eids = eids[eids > -1] eids = eids[eids > -1]
edges = ng.find_edges(eids) edges = ng.find_edges(eids)
treeg = dgl.graph(edges, num_nodes=ng.number_of_nodes()) treeg = dgl.graph(edges, num_nodes=ng.num_nodes())
return treeg return treeg
def peak_propogation(treeg): def peak_propogation(treeg):
treeg.ndata["pred_labels"] = torch.zeros(treeg.number_of_nodes()).long() - 1 treeg.ndata["pred_labels"] = torch.zeros(treeg.num_nodes()).long() - 1
peaks = torch.where(treeg.in_degrees() == 0)[0].cpu().numpy() peaks = torch.where(treeg.in_degrees() == 0)[0].cpu().numpy()
treeg.ndata["pred_labels"][peaks] = torch.arange(peaks.shape[0]) treeg.ndata["pred_labels"][peaks] = torch.arange(peaks.shape[0])
...@@ -157,7 +157,7 @@ def decode( ...@@ -157,7 +157,7 @@ def decode(
ng = dgl.remove_edges(g, eids) ng = dgl.remove_edges(g, eids)
# Tree generation # Tree generation
ng.edata[dgl.EID] = torch.arange(ng.number_of_edges()) ng.edata[dgl.EID] = torch.arange(ng.num_edges())
treeg = tree_generation(ng) treeg = tree_generation(ng)
# Label propogation # Label propogation
peaks, pred_labels = peak_propogation(treeg) peaks, pred_labels = peak_propogation(treeg)
......
...@@ -197,8 +197,8 @@ class JTNNCollator(object): ...@@ -197,8 +197,8 @@ class JTNNCollator(object):
tree_mess_tgt_e[i] += n_graph_nodes tree_mess_tgt_e[i] += n_graph_nodes
tree_mess_src_e[i] += n_tree_nodes tree_mess_src_e[i] += n_tree_nodes
tree_mess_tgt_n[i] += n_graph_nodes tree_mess_tgt_n[i] += n_graph_nodes
n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i]) n_graph_nodes += sum(g.num_nodes() for g in cand_graphs[i])
n_tree_nodes += mol_trees[i].graph.number_of_nodes() n_tree_nodes += mol_trees[i].graph.num_nodes()
cand_batch_idx.extend([i] * len(cand_graphs[i])) cand_batch_idx.extend([i] * len(cand_graphs[i]))
tree_mess_tgt_e = torch.cat(tree_mess_tgt_e) tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)
tree_mess_src_e = torch.cat(tree_mess_src_e) tree_mess_src_e = torch.cat(tree_mess_src_e)
......
...@@ -225,8 +225,8 @@ class DGLJTMPN(nn.Module): ...@@ -225,8 +225,8 @@ class DGLJTMPN(nn.Module):
cand_graphs, backtracking=False, shared=True cand_graphs, backtracking=False, shared=True
) )
n_nodes = cand_graphs.number_of_nodes() n_nodes = cand_graphs.num_nodes()
n_edges = cand_graphs.number_of_edges() n_edges = cand_graphs.num_edges()
cand_graphs = self.run( cand_graphs = self.run(
cand_graphs, cand_graphs,
...@@ -255,7 +255,7 @@ class DGLJTMPN(nn.Module): ...@@ -255,7 +255,7 @@ class DGLJTMPN(nn.Module):
tree_mess_tgt_nodes, tree_mess_tgt_nodes,
mol_tree_batch, mol_tree_batch,
): ):
n_nodes = cand_graphs.number_of_nodes() n_nodes = cand_graphs.num_nodes()
cand_graphs.apply_edges( cand_graphs.apply_edges(
func=lambda edges: {"src_x": edges.src["x"]}, func=lambda edges: {"src_x": edges.src["x"]},
...@@ -282,7 +282,7 @@ class DGLJTMPN(nn.Module): ...@@ -282,7 +282,7 @@ class DGLJTMPN(nn.Module):
) )
cand_graphs.edata["alpha"] = cuda( cand_graphs.edata["alpha"] = cuda(
torch.zeros(cand_graphs.number_of_edges(), self.hidden_size) torch.zeros(cand_graphs.num_edges(), self.hidden_size)
) )
cand_graphs.ndata["alpha"] = zero_node_state cand_graphs.ndata["alpha"] = zero_node_state
if tree_mess_src_edges.shape[0] > 0: if tree_mess_src_edges.shape[0] > 0:
......
...@@ -126,8 +126,8 @@ class DGLJTNNDecoder(nn.Module): ...@@ -126,8 +126,8 @@ class DGLJTNNDecoder(nn.Module):
np.insert(mol_tree_batch.batch_num_nodes().cpu().numpy(), 0, 0) np.insert(mol_tree_batch.batch_num_nodes().cpu().numpy(), 0, 0)
) )
root_ids = node_offset[:-1] root_ids = node_offset[:-1]
n_nodes = mol_tree_batch.number_of_nodes() n_nodes = mol_tree_batch.num_nodes()
n_edges = mol_tree_batch.number_of_edges() n_edges = mol_tree_batch.num_edges()
mol_tree_batch.ndata.update( mol_tree_batch.ndata.update(
{ {
......
...@@ -68,8 +68,8 @@ class DGLJTNNEncoder(nn.Module): ...@@ -68,8 +68,8 @@ class DGLJTNNEncoder(nn.Module):
np.insert(mol_tree_batch.batch_num_nodes().cpu().numpy(), 0, 0) np.insert(mol_tree_batch.batch_num_nodes().cpu().numpy(), 0, 0)
) )
root_ids = node_offset[:-1] root_ids = node_offset[:-1]
n_nodes = mol_tree_batch.number_of_nodes() n_nodes = mol_tree_batch.num_nodes()
n_edges = mol_tree_batch.number_of_edges() n_edges = mol_tree_batch.num_edges()
# Assign structure embeddings to tree nodes # Assign structure embeddings to tree nodes
mol_tree_batch.ndata.update( mol_tree_batch.ndata.update(
......
...@@ -70,10 +70,10 @@ class DGLJTNNVAE(nn.Module): ...@@ -70,10 +70,10 @@ class DGLJTNNVAE(nn.Module):
[t.graph for t in mol_batch["mol_trees"]] [t.graph for t in mol_batch["mol_trees"]]
) )
self.n_nodes_total += mol_graphs.number_of_nodes() self.n_nodes_total += mol_graphs.num_nodes()
self.n_edges_total += mol_graphs.number_of_edges() self.n_edges_total += mol_graphs.num_edges()
self.n_tree_nodes_total += sum( self.n_tree_nodes_total += sum(
t.graph.number_of_nodes() for t in mol_batch["mol_trees"] t.graph.num_nodes() for t in mol_batch["mol_trees"]
) )
self.n_passes += 1 self.n_passes += 1
......
...@@ -72,7 +72,7 @@ class DGLMolTree(object): ...@@ -72,7 +72,7 @@ class DGLMolTree(object):
self.nodes_dict[i]["is_leaf"] = self.graph.out_degrees(i) == 1 self.nodes_dict[i]["is_leaf"] = self.graph.out_degrees(i) == 1
def treesize(self): def treesize(self):
return self.graph.number_of_nodes() return self.graph.num_nodes()
def _recover_node(self, i, original_mol): def _recover_node(self, i, original_mol):
node = self.nodes_dict[i] node = self.nodes_dict[i]
......
...@@ -154,8 +154,8 @@ class DGLMPN(nn.Module): ...@@ -154,8 +154,8 @@ class DGLMPN(nn.Module):
mol_line_graph = line_graph(mol_graph, backtracking=False, shared=True) mol_line_graph = line_graph(mol_graph, backtracking=False, shared=True)
n_nodes = mol_graph.number_of_nodes() n_nodes = mol_graph.num_nodes()
n_edges = mol_graph.number_of_edges() n_edges = mol_graph.num_edges()
mol_graph = self.run(mol_graph, mol_line_graph) mol_graph = self.run(mol_graph, mol_line_graph)
...@@ -170,7 +170,7 @@ class DGLMPN(nn.Module): ...@@ -170,7 +170,7 @@ class DGLMPN(nn.Module):
return g_repr return g_repr
def run(self, mol_graph, mol_line_graph): def run(self, mol_graph, mol_line_graph):
n_nodes = mol_graph.number_of_nodes() n_nodes = mol_graph.num_nodes()
mol_graph.apply_edges( mol_graph.apply_edges(
func=lambda edges: {"src_x": edges.src["x"]}, func=lambda edges: {"src_x": edges.src["x"]},
......
...@@ -50,4 +50,4 @@ def tocpu(g): ...@@ -50,4 +50,4 @@ def tocpu(g):
src, dst = g.edges() src, dst = g.edges()
src = src.cpu() src = src.cpu()
dst = dst.cpu() dst = dst.cpu()
return dgl.graph((src, dst), num_nodes=g.number_of_nodes()) return dgl.graph((src, dst), num_nodes=g.num_nodes())
...@@ -94,7 +94,7 @@ def generate_metapath(): ...@@ -94,7 +94,7 @@ def generate_metapath():
hg, author_names, conf_names, paper_names = construct_graph() hg, author_names, conf_names, paper_names = construct_graph()
for conf_idx in tqdm.trange(hg.number_of_nodes("conf")): for conf_idx in tqdm.trange(hg.num_nodes("conf")):
traces, _ = dgl.sampling.random_walk( traces, _ = dgl.sampling.random_walk(
hg, hg,
[conf_idx] * num_walks_per_node, [conf_idx] * num_walks_per_node,
......
...@@ -62,7 +62,7 @@ def main(args): ...@@ -62,7 +62,7 @@ def main(args):
test_mask = g.ndata["test_mask"] test_mask = g.ndata["test_mask"]
in_feats = features.shape[1] in_feats = features.shape[1]
n_classes = data.num_labels n_classes = data.num_labels
n_edges = g.number_of_edges() n_edges = g.num_edges()
print( print(
"""----Data statistics------' """----Data statistics------'
#Edges %d #Edges %d
...@@ -83,7 +83,7 @@ def main(args): ...@@ -83,7 +83,7 @@ def main(args):
# add self loop # add self loop
if args.self_loop: if args.self_loop:
g = g.remove_self_loop().add_self_loop() g = g.remove_self_loop().add_self_loop()
n_edges = g.number_of_edges() n_edges = g.num_edges()
# normalization # normalization
degs = g.in_degrees().float() degs = g.in_degrees().float()
......
...@@ -8,7 +8,7 @@ def get_coordinates(graphs, grid_side, coarsening_levels, perm): ...@@ -8,7 +8,7 @@ def get_coordinates(graphs, grid_side, coarsening_levels, perm):
rst = [] rst = []
for l in range(coarsening_levels + 1): for l in range(coarsening_levels + 1):
xs, ys = [], [] xs, ys = [], []
for i in range(graphs[l].number_of_nodes()): for i in range(graphs[l].num_nodes()):
cnt = eps cnt = eps
x_accum = 0 x_accum = 0
y_accum = 0 y_accum = 0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment