Unverified Commit 5008af22 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Rename number_of_edges and number_of_nodes to num_edges and num_nodes in examples. (#5492)



* pytorch_example

* fix

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 3c8ac093
......@@ -52,7 +52,7 @@ def main(args):
test_mask = g.ndata["test_mask"]
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = g.number_of_edges()
n_edges = g.num_edges()
print(
"""----Data statistics------'
#Edges %d
......@@ -69,7 +69,7 @@ def main(args):
)
)
n_edges = g.number_of_edges()
n_edges = g.num_edges()
# add self loop
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
......
......@@ -77,7 +77,7 @@ def get_ppi():
test_dataset = PPIDataset(mode="test")
train_val_dataset = [i for i in train_dataset] + [i for i in val_dataset]
for idx, data in enumerate(train_val_dataset):
data.ndata["batch"] = torch.zeros(data.number_of_nodes()) + idx
data.ndata["batch"] = torch.zeros(data.num_nodes()) + idx
data.ndata["batch"] = data.ndata["batch"].long()
g = list(GraphDataLoader(train_val_dataset, batch_size=22, shuffle=True))
......
......@@ -22,7 +22,7 @@ class CARESampler(dgl.dataloading.BlockSampler):
with g.local_scope():
new_edges_masks = {}
for etype in g.canonical_etypes:
edge_mask = th.zeros(g.number_of_edges(etype))
edge_mask = th.zeros(g.num_edges(etype))
# extract each node from dict because of single node type
for node in seed_nodes:
edges = g.in_edges(node, form="eid", etype=etype)
......
......@@ -24,7 +24,7 @@ class Encoder(nn.Module):
def forward(self, features, corrupt=False):
if corrupt:
perm = torch.randperm(self.g.number_of_nodes())
perm = torch.randperm(self.g.num_nodes())
features = features[perm]
features = self.conv(features)
return features
......
......@@ -38,7 +38,7 @@ def main(args):
test_mask = torch.ByteTensor(g.ndata["test_mask"])
in_feats = features.shape[1]
n_classes = data.num_classes
n_edges = g.number_of_edges()
n_edges = g.num_edges()
if args.gpu < 0:
cuda = False
......@@ -55,7 +55,7 @@ def main(args):
if args.self_loop:
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_edges = g.number_of_edges()
n_edges = g.num_edges()
if args.gpu >= 0:
g = g.to(args.gpu)
......
......@@ -22,7 +22,7 @@ def get_next(i, v_max):
def is_cycle(g):
size = g.number_of_nodes()
size = g.num_nodes()
if size < 3:
return False
......@@ -102,7 +102,7 @@ class CycleDataset(Dataset):
def dglGraph_to_adj_list(g):
adj_list = {}
for node in range(g.number_of_nodes()):
for node in range(g.num_nodes()):
# For undirected graph. successors and
# predecessors are equivalent.
adj_list[node] = g.successors(node).tolist()
......@@ -141,7 +141,7 @@ class CycleModelEvaluation(object):
sampled_adj_list = dglGraph_to_adj_list(sampled_graph)
adj_lists_to_plot.append(sampled_adj_list)
graph_size = sampled_graph.number_of_nodes()
graph_size = sampled_graph.num_nodes()
valid_size = self.v_min <= graph_size <= self.v_max
cycle = is_cycle(sampled_graph)
......
......@@ -21,7 +21,7 @@ class GraphEmbed(nn.Module):
self.node_to_graph = nn.Linear(node_hidden_size, self.graph_hidden_size)
def forward(self, g):
if g.number_of_nodes() == 0:
if g.num_nodes() == 0:
return torch.zeros(1, self.graph_hidden_size)
else:
# Node features are stored as hv in ndata.
......@@ -75,7 +75,7 @@ class GraphProp(nn.Module):
return {"a": node_activation}
def forward(self, g):
if g.number_of_edges() == 0:
if g.num_edges() == 0:
return
else:
for t in range(self.num_prop_rounds):
......@@ -115,7 +115,7 @@ class AddNode(nn.Module):
self.init_node_activation = torch.zeros(1, 2 * node_hidden_size)
def _initialize_node_repr(self, g, node_type, graph_embed):
num_nodes = g.number_of_nodes()
num_nodes = g.num_nodes()
hv_init = self.initialize_hv(
torch.cat(
[
......@@ -166,7 +166,7 @@ class AddEdge(nn.Module):
def forward(self, g, action=None):
graph_embed = self.graph_op["embed"](g)
src_embed = g.nodes[g.number_of_nodes() - 1].data["hv"]
src_embed = g.nodes[g.num_nodes() - 1].data["hv"]
logit = self.add_edge(torch.cat([graph_embed, src_embed], dim=1))
prob = torch.sigmoid(logit)
......@@ -200,7 +200,7 @@ class ChooseDestAndUpdate(nn.Module):
self.log_prob = []
def forward(self, g, dest):
src = g.number_of_nodes() - 1
src = g.num_nodes() - 1
possible_dests = range(src)
src_embed_expand = g.nodes[src].data["hv"].expand(src, -1)
......@@ -320,10 +320,10 @@ class DGMG(nn.Module):
def forward_inference(self):
stop = self.add_node_and_update()
while (not stop) and (self.g.number_of_nodes() < self.v_max + 1):
while (not stop) and (self.g.num_nodes() < self.v_max + 1):
num_trials = 0
to_add_edge = self.add_edge_or_not()
while to_add_edge and (num_trials < self.g.number_of_nodes() - 1):
while to_add_edge and (num_trials < self.g.num_nodes() - 1):
self.choose_dest_and_update()
num_trials += 1
to_add_edge = self.add_edge_or_not()
......
......@@ -26,7 +26,7 @@ def pre_process(dataset, prog_args):
print("overwrite node attributes with DiffPool's preprocess setting")
if prog_args.data_mode == "id":
for g, _ in dataset:
id_list = np.arange(g.number_of_nodes())
id_list = np.arange(g.num_nodes())
g.ndata["feat"] = one_hotify(id_list, pad=dataset.max_num_node)
elif prog_args.data_mode == "deg-num":
......
......@@ -148,7 +148,7 @@ class DiffPoolBatchedGraphLayer(nn.Module):
if self.link_pred:
current_lp_loss = torch.norm(
adj.to_dense() - torch.mm(assign_tensor, torch.t(assign_tensor))
) / np.power(g.number_of_nodes(), 2)
) / np.power(g.num_nodes(), 2)
self.loss_log["LinkPredLoss"] = current_lp_loss
for loss_layer in self.reg_loss:
......
......@@ -90,7 +90,7 @@ def main(args):
test_mask = g.ndata["test_mask"]
num_feats = features.shape[1]
n_classes = data.num_labels
n_edges = g.number_of_edges()
n_edges = g.num_edges()
print(
"""----Data statistics------'
#Edges %d
......@@ -110,7 +110,7 @@ def main(args):
# add self loop
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_edges = g.number_of_edges()
n_edges = g.num_edges()
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GATv2(
......
......@@ -305,49 +305,49 @@ class MovieLens(object):
rst = 0
for r in self.possible_rating_values:
r = to_etype_name(r)
rst += graph.number_of_edges(str(r))
rst += graph.num_edges(str(r))
return rst
print(
"Train enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_enc_graph.number_of_nodes("user"),
self.train_enc_graph.number_of_nodes("movie"),
self.train_enc_graph.num_nodes("user"),
self.train_enc_graph.num_nodes("movie"),
_npairs(self.train_enc_graph),
)
)
print(
"Train dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_dec_graph.number_of_nodes("user"),
self.train_dec_graph.number_of_nodes("movie"),
self.train_dec_graph.number_of_edges(),
self.train_dec_graph.num_nodes("user"),
self.train_dec_graph.num_nodes("movie"),
self.train_dec_graph.num_edges(),
)
)
print(
"Valid enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.valid_enc_graph.number_of_nodes("user"),
self.valid_enc_graph.number_of_nodes("movie"),
self.valid_enc_graph.num_nodes("user"),
self.valid_enc_graph.num_nodes("movie"),
_npairs(self.valid_enc_graph),
)
)
print(
"Valid dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.valid_dec_graph.number_of_nodes("user"),
self.valid_dec_graph.number_of_nodes("movie"),
self.valid_dec_graph.number_of_edges(),
self.valid_dec_graph.num_nodes("user"),
self.valid_dec_graph.num_nodes("movie"),
self.valid_dec_graph.num_edges(),
)
)
print(
"Test enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.test_enc_graph.number_of_nodes("user"),
self.test_enc_graph.number_of_nodes("movie"),
self.test_enc_graph.num_nodes("user"),
self.test_enc_graph.num_nodes("movie"),
_npairs(self.test_enc_graph),
)
)
print(
"Test dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.test_dec_graph.number_of_nodes("user"),
self.test_dec_graph.number_of_nodes("movie"),
self.test_dec_graph.number_of_edges(),
self.test_dec_graph.num_nodes("user"),
self.test_dec_graph.num_nodes("movie"),
self.test_dec_graph.num_edges(),
)
)
......@@ -398,7 +398,7 @@ class MovieLens(object):
# sanity check
assert (
len(rating_pairs[0])
== sum([graph.number_of_edges(et) for et in graph.etypes]) // 2
== sum([graph.num_edges(et) for et in graph.etypes]) // 2
)
if add_support:
......
......@@ -100,8 +100,8 @@ def load_subtensor(input_nodes, pair_graph, blocks, dataset, parent_graph):
def flatten_etypes(pair_graph, dataset, segment):
n_users = pair_graph.number_of_nodes("user")
n_movies = pair_graph.number_of_nodes("movie")
n_users = pair_graph.num_nodes("user")
n_movies = pair_graph.num_nodes("movie")
src = []
dst = []
labels = []
......@@ -274,7 +274,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
dataset.train_enc_graph,
{
to_etype_name(k): th.arange(
dataset.train_enc_graph.number_of_edges(etype=to_etype_name(k))
dataset.train_enc_graph.num_edges(etype=to_etype_name(k))
)
for k in dataset.possible_rating_values
},
......@@ -288,7 +288,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
if proc_id == 0:
valid_dataloader = dgl.dataloading.DataLoader(
dataset.valid_dec_graph,
th.arange(dataset.valid_dec_graph.number_of_edges()),
th.arange(dataset.valid_dec_graph.num_edges()),
sampler,
g_sampling=dataset.valid_enc_graph,
batch_size=args.minibatch_size,
......@@ -297,7 +297,7 @@ def run(proc_id, n_gpus, args, devices, dataset):
)
test_dataloader = dgl.dataloading.DataLoader(
dataset.test_dec_graph,
th.arange(dataset.test_dec_graph.number_of_edges()),
th.arange(dataset.test_dec_graph.num_edges()),
sampler,
g_sampling=dataset.test_enc_graph,
batch_size=args.minibatch_size,
......
......@@ -33,7 +33,7 @@ class GraphClsGGNN(nn.Module):
assert annotation.size()[-1] == self.annotation_size
node_num = graph.number_of_nodes()
node_num = graph.num_nodes()
zero_pad = torch.zeros(
[node_num, self.out_feats - self.annotation_size],
......
......@@ -30,7 +30,7 @@ class NodeSelectionGGNN(nn.Module):
assert annotation.size()[-1] == self.annotation_size
node_num = graph.number_of_nodes()
node_num = graph.num_nodes()
zero_pad = torch.zeros(
[node_num, self.out_feats - self.annotation_size],
......
......@@ -48,7 +48,7 @@ class GGSNN(nn.Module):
assert annotation.size()[-1] == self.annotation_size
node_num = graph.number_of_nodes()
node_num = graph.num_nodes()
all_logits = []
for _ in range(self.max_seq_length):
......
......@@ -24,18 +24,18 @@ distance, node_mapping, edge_mapping = graph_edit_distance(
print(distance) # 1.0
# With user-input cost matrices
node_substitution_cost = np.empty((G1.number_of_nodes(), G2.number_of_nodes()))
G1_node_deletion_cost = np.empty(G1.number_of_nodes())
G2_node_insertion_cost = np.empty(G2.number_of_nodes())
node_substitution_cost = np.empty((G1.num_nodes(), G2.num_nodes()))
G1_node_deletion_cost = np.empty(G1.num_nodes())
G2_node_insertion_cost = np.empty(G2.num_nodes())
edge_substitution_cost = np.empty((G1.number_of_edges(), G2.number_of_edges()))
G1_edge_deletion_cost = np.empty(G1.number_of_edges())
G2_edge_insertion_cost = np.empty(G2.number_of_edges())
edge_substitution_cost = np.empty((G1.num_edges(), G2.num_edges()))
G1_edge_deletion_cost = np.empty(G1.num_edges())
G2_edge_insertion_cost = np.empty(G2.num_edges())
# Node substitution cost of 0 when node-ids are same, else 1
node_substitution_cost.fill(1.0)
for i in range(G1.number_of_nodes()):
for j in range(G2.number_of_nodes()):
for i in range(G1.num_nodes()):
for j in range(G2.num_nodes()):
node_substitution_cost[i, j] = 0.0
# Node insertion/deletion cost of 1
......
......@@ -29,11 +29,11 @@ def validate_cost_functions(
Parameters : see graph_edit_distance
"""
num_G1_nodes = G1.number_of_nodes()
num_G2_nodes = G2.number_of_nodes()
num_G1_nodes = G1.num_nodes()
num_G2_nodes = G2.num_nodes()
num_G1_edges = G1.number_of_edges()
num_G2_edges = G2.number_of_edges()
num_G1_edges = G1.num_edges()
num_G2_edges = G2.num_edges()
# if any cost matrix is None, initialize it with default costs
if node_substitution_cost is None:
......@@ -96,11 +96,11 @@ def construct_cost_functions(
Parameters : see graph_edit_distance
"""
num_G1_nodes = G1.number_of_nodes()
num_G2_nodes = G2.number_of_nodes()
num_G1_nodes = G1.num_nodes()
num_G2_nodes = G2.num_nodes()
num_G1_edges = G1.number_of_edges()
num_G2_edges = G2.number_of_edges()
num_G1_edges = G1.num_edges()
num_G2_edges = G2.num_edges()
# cost matrix of node mappings
cost_upper_bound = (
......@@ -268,11 +268,11 @@ class search_tree_node:
self.matched_cost += cost_matrix_nodes[node_G1, node_G2]
elif node_G1 is not None: # Delete node_G1
self.matched_cost += cost_matrix_nodes[
node_G1, node_G1 + G2.number_of_nodes()
node_G1, node_G1 + G2.num_nodes()
]
elif node_G2 is not None: # Insert node_G2
self.matched_cost += cost_matrix_nodes[
node_G2 + G1.number_of_nodes(), node_G2
node_G2 + G1.num_nodes(), node_G2
]
# Add the cost of matching edges at this tree-node to the matched cost
......@@ -299,8 +299,8 @@ class search_tree_node:
cost_matrix_edges,
incident_edges_G1,
incident_edges_G2,
G1.number_of_edges(),
G2.number_of_edges(),
G1.num_edges(),
G2.num_edges(),
)
max_sum = matched_edges_cost_matrix.sum()
# take care of impossible assignments by assigning maximum cost
......@@ -339,7 +339,7 @@ class search_tree_node:
edge_deletion_cost = 0.0
for edge in incident_edges_G1:
edge_deletion_cost += cost_matrix_edges[
edge, G2.number_of_edges() + edge
edge, G2.num_edges() + edge
]
# Update matched edges
for edge in incident_edges_G1:
......@@ -354,7 +354,7 @@ class search_tree_node:
edge_insertion_cost = 0.0
for edge in incident_edges_G2:
edge_insertion_cost += cost_matrix_edges[
G1.number_of_edges() + edge, edge
G1.num_edges() + edge, edge
]
# Update matched edges
for edge in incident_edges_G2:
......@@ -372,8 +372,8 @@ class search_tree_node:
cost_matrix_nodes,
self.unprocessed_nodes_G1,
self.unprocessed_nodes_G2,
G1.number_of_nodes(),
G2.number_of_nodes(),
G1.num_nodes(),
G2.num_nodes(),
)
# Match the edges as per the LAP solution
row_ind, col_ind, _ = lapjv(unmatched_nodes_cost_matrix)
......@@ -387,7 +387,7 @@ class search_tree_node:
node_deletion_cost = 0.0
for node in self.unprocessed_nodes_G1:
node_deletion_cost += cost_matrix_nodes[
node, G2.number_of_nodes() + node
node, G2.num_nodes() + node
]
self.future_approximate_cost += node_deletion_cost
......@@ -396,7 +396,7 @@ class search_tree_node:
node_insertion_cost = 0.0
for node in self.unprocessed_nodes_G2:
node_insertion_cost += cost_matrix_nodes[
G1.number_of_nodes() + node, node
G1.num_nodes() + node, node
]
self.future_approximate_cost += node_insertion_cost
......@@ -416,8 +416,8 @@ class search_tree_node:
cost_matrix_edges,
self.unprocessed_edges_G1,
self.unprocessed_edges_G2,
G1.number_of_edges(),
G2.number_of_edges(),
G1.num_edges(),
G2.num_edges(),
)
# Match the edges as per the LAP solution
row_ind, col_ind, _ = lapjv(unmatched_edges_cost_matrix)
......@@ -431,7 +431,7 @@ class search_tree_node:
edge_deletion_cost = 0.0
for edge in self.unprocessed_edges_G1:
edge_deletion_cost += cost_matrix_edges[
edge, G2.number_of_edges() + edge
edge, G2.num_edges() + edge
]
self.future_approximate_cost += edge_deletion_cost
......@@ -440,7 +440,7 @@ class search_tree_node:
edge_insertion_cost = 0.0
for edge in self.unprocessed_edges_G2:
edge_insertion_cost += cost_matrix_edges[
G1.number_of_edges() + edge, edge
G1.num_edges() + edge, edge
]
self.future_approximate_cost += edge_insertion_cost
......@@ -481,16 +481,16 @@ def edit_cost_from_node_matching(
matched_nodes = ([], [])
matched_edges = ([], [])
# Add the cost of matching nodes
for i in range(G1.number_of_nodes()):
for i in range(G1.num_nodes()):
matched_cost += cost_matrix_nodes[i, node_matching[i]]
matched_nodes[0].append(i)
if node_matching[i] < G2.number_of_nodes():
if node_matching[i] < G2.num_nodes():
matched_nodes[1].append(node_matching[i])
else:
matched_nodes[1].append(None)
for i in range(G1.number_of_nodes(), len(node_matching)):
for i in range(G1.num_nodes(), len(node_matching)):
matched_cost += cost_matrix_nodes[i, node_matching[i]]
if node_matching[i] < G2.number_of_nodes():
if node_matching[i] < G2.num_nodes():
matched_nodes[0].append(None)
matched_nodes[1].append(node_matching[i])
......@@ -519,8 +519,8 @@ def edit_cost_from_node_matching(
cost_matrix_edges,
incident_edges_G1,
incident_edges_G2,
G1.number_of_edges(),
G2.number_of_edges(),
G1.num_edges(),
G2.num_edges(),
)
max_sum = matched_edges_cost_matrix.sum()
# take care of impossible assignments by assigning maximum cost
......@@ -557,7 +557,7 @@ def edit_cost_from_node_matching(
edge_deletion_cost = 0.0
for edge in incident_edges_G1:
edge_deletion_cost += cost_matrix_edges[
edge, G2.number_of_edges() + edge
edge, G2.num_edges() + edge
]
# Update matched edges
for edge in incident_edges_G1:
......@@ -572,7 +572,7 @@ def edit_cost_from_node_matching(
edge_insertion_cost = 0.0
for edge in incident_edges_G2:
edge_insertion_cost += cost_matrix_edges[
G1.number_of_edges() + edge, edge
G1.num_edges() + edge, edge
]
# Update matched edges
for edge in incident_edges_G2:
......@@ -597,11 +597,11 @@ def contextual_cost_matrix_construction(
# Calculates approximate GED using linear assignment on the nodes with bipartite algorithm
# cost matrix of node mappings
num_G1_nodes = G1.number_of_nodes()
num_G2_nodes = G2.number_of_nodes()
num_G1_nodes = G1.num_nodes()
num_G2_nodes = G2.num_nodes()
num_G1_edges = G1.number_of_edges()
num_G2_edges = G2.number_of_edges()
num_G1_edges = G1.num_edges()
num_G2_edges = G2.num_edges()
cost_upper_bound = 2 * (
node_substitution_cost.sum()
......@@ -681,7 +681,7 @@ def contextual_cost_matrix_construction(
)
)
]
for i in range(G1.number_of_nodes())
for i in range(G1.num_nodes())
]
selected_insertion_G2 = [
G2_edge_insertion_cost[
......@@ -693,7 +693,7 @@ def contextual_cost_matrix_construction(
)
)
]
for i in range(G2.number_of_nodes())
for i in range(G2.num_nodes())
]
# Add the cost of edge edition which are dependent of a node (see this as the cost associated with a substructure)
......@@ -774,11 +774,11 @@ def hausdorff_matching(
# Calculates approximate GED using hausdorff_matching
# cost matrix of node mappings
num_G1_nodes = G1.number_of_nodes()
num_G2_nodes = G2.number_of_nodes()
num_G1_nodes = G1.num_nodes()
num_G2_nodes = G2.num_nodes()
num_G1_edges = G1.number_of_edges()
num_G2_edges = G2.number_of_edges()
num_G1_edges = G1.num_edges()
num_G2_edges = G2.num_edges()
self_edge_list_G1 = [np.array([], dtype=int)] * num_G1_nodes
self_edge_list_G2 = [np.array([], dtype=int)] * num_G2_nodes
......@@ -816,29 +816,29 @@ def hausdorff_matching(
selected_deletion_self_G1 = [
G1_edge_deletion_cost[self_edge_list_G1[i]]
for i in range(G1.number_of_nodes())
for i in range(G1.num_nodes())
]
selected_insertion_self_G2 = [
G2_edge_insertion_cost[self_edge_list_G2[i]]
for i in range(G2.number_of_nodes())
for i in range(G2.num_nodes())
]
selected_deletion_incoming_G1 = [
G1_edge_deletion_cost[incoming_edges_G1[i]]
for i in range(G1.number_of_nodes())
for i in range(G1.num_nodes())
]
selected_insertion_incoming_G2 = [
G2_edge_insertion_cost[incoming_edges_G2[i]]
for i in range(G2.number_of_nodes())
for i in range(G2.num_nodes())
]
selected_deletion_outgoing_G1 = [
G1_edge_deletion_cost[outgoing_edges_G1[i]]
for i in range(G1.number_of_nodes())
for i in range(G1.num_nodes())
]
selected_insertion_outgoing_G2 = [
G2_edge_insertion_cost[outgoing_edges_G2[i]]
for i in range(G2.number_of_nodes())
for i in range(G2.num_nodes())
]
selected_deletion_G1 = [
......@@ -851,7 +851,7 @@ def hausdorff_matching(
)
)
]
for i in range(G1.number_of_nodes())
for i in range(G1.num_nodes())
]
selected_insertion_G2 = [
G2_edge_insertion_cost[
......@@ -863,7 +863,7 @@ def hausdorff_matching(
)
)
]
for i in range(G2.number_of_nodes())
for i in range(G2.num_nodes())
]
cost_G1 = np.array(
......@@ -1001,16 +1001,16 @@ def a_star_search(G1, G2, cost_matrix_nodes, cost_matrix_edges, max_beam_size):
matched_edges = ([], [])
# No edges matched in the beginning
unprocessed_nodes_G1 = [
i for i in range(G1.number_of_nodes())
i for i in range(G1.num_nodes())
] # No nodes matched in the beginning
unprocessed_nodes_G2 = [
i for i in range(G2.number_of_nodes())
i for i in range(G2.num_nodes())
] # No nodes matched in the beginning
unprocessed_edges_G1 = [
i for i in range(G1.number_of_edges())
i for i in range(G1.num_edges())
] # No edges matched in the beginning
unprocessed_edges_G2 = [
i for i in range(G2.number_of_edges())
i for i in range(G2.num_edges())
] # No edges matched in the beginning
for i in range(len(unprocessed_nodes_G2)):
......@@ -1278,12 +1278,8 @@ def graph_edit_distance(
)
return (
matched_cost,
get_sorted_mapping(
matched_nodes, G1.number_of_nodes(), G2.number_of_nodes()
),
get_sorted_mapping(
matched_edges, G1.number_of_edges(), G2.number_of_edges()
),
get_sorted_mapping(matched_nodes, G1.num_nodes(), G2.num_nodes()),
get_sorted_mapping(matched_edges, G1.num_edges(), G2.num_edges()),
)
elif algorithm == "hausdorff":
......@@ -1324,10 +1320,6 @@ def graph_edit_distance(
return (
matched_cost,
get_sorted_mapping(
matched_nodes, G1.number_of_nodes(), G2.number_of_nodes()
),
get_sorted_mapping(
matched_edges, G1.number_of_edges(), G2.number_of_edges()
),
get_sorted_mapping(matched_nodes, G1.num_nodes(), G2.num_nodes()),
get_sorted_mapping(matched_edges, G1.num_edges(), G2.num_edges()),
)
......@@ -3,11 +3,11 @@ import os
import sys
import time
import dgl
import numpy as np
import torch as th
import dgl
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from load_graph import load_ogb, load_reddit
......@@ -65,7 +65,7 @@ if __name__ == "__main__":
print(
"load {} takes {:.3f} seconds".format(args.dataset, time.time() - start)
)
print("|V|={}, |E|={}".format(g.number_of_nodes(), g.number_of_edges()))
print("|V|={}, |E|={}".format(g.num_nodes(), g.num_edges()))
print(
"train: {}, valid: {}, test: {}".format(
th.sum(g.ndata["train_mask"]),
......
......@@ -34,11 +34,11 @@ def load_ogb(name, root="dataset"):
splitted_idx["valid"],
splitted_idx["test"],
)
train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
train_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
train_mask[train_nid] = True
val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
val_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
val_mask[val_nid] = True
test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
test_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
test_mask[test_nid] = True
graph.ndata["train_mask"] = train_mask
graph.ndata["val_mask"] = val_mask
......
......@@ -154,7 +154,7 @@ def load_acm(remove_self_loop):
val_idx = torch.from_numpy(data["val_idx"]).long().squeeze(0)
test_idx = torch.from_numpy(data["test_idx"]).long().squeeze(0)
num_nodes = author_g.number_of_nodes()
num_nodes = author_g.num_nodes()
train_mask = get_binary_mask(num_nodes, train_idx)
val_mask = get_binary_mask(num_nodes, val_idx)
test_mask = get_binary_mask(num_nodes, test_idx)
......@@ -238,7 +238,7 @@ def load_acm_raw(remove_self_loop):
val_idx = np.where((float_mask > 0.2) & (float_mask <= 0.3))[0]
test_idx = np.where(float_mask > 0.3)[0]
num_nodes = hg.number_of_nodes("paper")
num_nodes = hg.num_nodes("paper")
train_mask = get_binary_mask(num_nodes, train_idx)
val_mask = get_binary_mask(num_nodes, val_idx)
test_mask = get_binary_mask(num_nodes, test_idx)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment