Unverified Commit 5008af22 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Rename number_of_edges and number_of_nodes to num_edges and num_nodes in examples. (#5492)



* pytorch_example

* fix

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 3c8ac093
......@@ -79,7 +79,7 @@ def main(args):
test_mask = g.ndata["test_mask"]
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = g.number_of_edges()
n_edges = g.num_edges()
print(
"""----Data statistics------'
#Edges %d
......@@ -98,7 +98,7 @@ def main(args):
# graph preprocess and calculate normalization factor
g = g.remove_self_loop().add_self_loop()
n_edges = g.number_of_edges()
n_edges = g.num_edges()
us, vs = g.edges(order="eid")
udeg, vdeg = 1 / torch.sqrt(g.in_degrees(us).float()), 1 / torch.sqrt(
g.in_degrees(vs).float()
......
......@@ -122,7 +122,7 @@ def process_dataset_appnp(epsilon):
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
appnp = APPNPConv(k, alpha)
id = th.eye(graph.number_of_nodes()).float()
id = th.eye(graph.num_nodes()).float()
diff_adj = appnp(graph.add_self_loop(), id).numpy()
diff_adj[diff_adj < epsilon] = 0
......
......@@ -81,7 +81,7 @@ if __name__ == "__main__":
val_idx = val_idx.to(args.device)
test_idx = test_idx.to(args.device)
n_node = graph.number_of_nodes()
n_node = graph.num_nodes()
lbl1 = th.ones(n_node * 2)
lbl2 = th.zeros(n_node * 2)
lbl = th.cat((lbl1, lbl2))
......
......@@ -99,7 +99,7 @@ if __name__ == "__main__":
val_idx = val_idx.to(args.device)
test_idx = test_idx.to(args.device)
n_node = graph.number_of_nodes()
n_node = graph.num_nodes()
sample_size = args.sample_size
......
......@@ -258,7 +258,7 @@ if __name__ == "__main__":
graph, labels = data[0]
labels = labels[:, 0]
num_nodes = train_idx.shape[0] + val_idx.shape[0] + test_idx.shape[0]
assert num_nodes == graph.number_of_nodes()
assert num_nodes == graph.num_nodes()
graph.ndata["labels"] = labels
mask = th.zeros(num_nodes, dtype=th.bool)
mask[train_idx] = True
......
......@@ -31,7 +31,7 @@ class DeepwalkTrainer:
ogbl_name=args.ogbl_name,
load_from_ogbl=args.load_from_ogbl,
)
self.emb_size = self.dataset.G.number_of_nodes()
self.emb_size = self.dataset.G.num_nodes()
self.emb_model = None
def init_device_emb(self):
......
......@@ -186,7 +186,7 @@ class DeepwalkDataset:
self.save_mapping(map_file)
self.G = net2graph(self.sm)
self.num_nodes = self.G.number_of_nodes()
self.num_nodes = self.G.num_nodes()
# random walk seeds
start = time.time()
......
......@@ -30,7 +30,7 @@ class LineTrainer:
load_from_ogbn=args.load_from_ogbn,
num_samples=args.num_samples * 1000000,
)
self.emb_size = self.dataset.G.number_of_nodes()
self.emb_size = self.dataset.G.num_nodes()
self.emb_model = None
def init_device_emb(self):
......
......@@ -188,11 +188,11 @@ class LineDataset:
self.G = make_undirected(self.G)
print("Finish reading graph")
self.num_nodes = self.G.number_of_nodes()
self.num_nodes = self.G.num_nodes()
start = time.time()
seeds = np.random.choice(
np.arange(self.G.number_of_edges()), self.num_samples, replace=True
np.arange(self.G.num_edges()), self.num_samples, replace=True
) # edge index
self.seeds = torch.split(
torch.LongTensor(seeds),
......
......@@ -42,9 +42,9 @@ def preprocess(graph):
graph.add_edges(dsts, srcs)
# add self-loop
print(f"Total edges before adding self-loop {graph.number_of_edges()}")
print(f"Total edges before adding self-loop {graph.num_edges()}")
graph = graph.remove_self_loop().add_self_loop()
print(f"Total edges after adding self-loop {graph.number_of_edges()}")
print(f"Total edges after adding self-loop {graph.num_edges()}")
graph.create_formats_()
......@@ -98,7 +98,7 @@ def run(args, graph, labels, pred, train_idx, val_idx, test_idx, evaluator):
y = pred.clone()
y[train_idx] = F.one_hot(labels[train_idx], n_classes).float().squeeze(1)
# dy = torch.zeros(graph.number_of_nodes(), n_classes, device=device)
# dy = torch.zeros(graph.num_nodes(), n_classes, device=device)
# dy[train_idx] = F.one_hot(labels[train_idx], n_classes).float().squeeze(1) - pred[train_idx]
_train_acc, val_acc, test_acc = evaluate(
......
......@@ -66,9 +66,9 @@ def preprocess(graph):
graph.ndata["feat"] = feat
# add self-loop
print(f"Total edges before adding self-loop {graph.number_of_edges()}")
print(f"Total edges before adding self-loop {graph.num_edges()}")
graph = graph.remove_self_loop().add_self_loop()
print(f"Total edges after adding self-loop {graph.number_of_edges()}")
print(f"Total edges after adding self-loop {graph.num_edges()}")
graph.create_formats_()
......
......@@ -341,9 +341,9 @@ def main():
graph.add_edges(dsts, srcs)
# add self-loop
print(f"Total edges before adding self-loop {graph.number_of_edges()}")
print(f"Total edges before adding self-loop {graph.num_edges()}")
graph = graph.remove_self_loop().add_self_loop()
print(f"Total edges after adding self-loop {graph.number_of_edges()}")
print(f"Total edges after adding self-loop {graph.num_edges()}")
in_feats = graph.ndata["feat"].shape[1]
n_classes = (labels.max() + 1).item()
......
......@@ -232,8 +232,8 @@ class GATConv(nn.Module):
e = self.leaky_relu(graph.edata.pop("e"))
if self.training and self.edge_drop > 0:
perm = torch.randperm(graph.number_of_edges(), device=e.device)
bound = int(graph.number_of_edges() * self.edge_drop)
perm = torch.randperm(graph.num_edges(), device=e.device)
bound = int(graph.num_edges() * self.edge_drop)
eids = perm[bound:]
graph.edata["a"] = torch.zeros_like(e)
graph.edata["a"][eids] = self.attn_drop(
......
......@@ -69,13 +69,11 @@ def preprocess(graph, labels, train_idx):
n_node_feats = graph.ndata["feat"].shape[-1]
graph.ndata["train_labels_onehot"] = torch.zeros(
graph.number_of_nodes(), n_classes
graph.num_nodes(), n_classes
)
graph.ndata["train_labels_onehot"][train_idx, labels[train_idx, 0]] = 1
graph.ndata["is_train"] = torch.zeros(
graph.number_of_nodes(), dtype=torch.bool
)
graph.ndata["is_train"] = torch.zeros(graph.num_nodes(), dtype=torch.bool)
graph.ndata["is_train"][train_idx] = 1
graph.create_formats_()
......
......@@ -139,8 +139,8 @@ class GATConv(nn.Module):
e = self.leaky_relu(e)
if self.training and self.edge_drop > 0:
perm = torch.randperm(graph.number_of_edges(), device=e.device)
bound = int(graph.number_of_edges() * self.edge_drop)
perm = torch.randperm(graph.num_edges(), device=e.device)
bound = int(graph.num_edges() * self.edge_drop)
eids = perm[bound:]
graph.edata["a"] = torch.zeros_like(e)
graph.edata["a"][eids] = self.attn_drop(
......
......@@ -68,7 +68,7 @@ def preprocess(graph, labels, train_idx):
# Only the labels in the training set are used as features, while others are filled with zeros.
graph.ndata["train_labels_onehot"] = torch.zeros(
graph.number_of_nodes(), n_classes
graph.num_nodes(), n_classes
)
graph.ndata["train_labels_onehot"][train_idx, labels[train_idx, 0]] = 1
graph.ndata["deg"] = graph.out_degrees().float().clamp(min=1)
......
......@@ -131,7 +131,7 @@ class MWE_GCN(nn.Module):
self.device = device
def forward(self, g, node_state=None):
node_state = torch.ones(g.number_of_nodes(), 1).float().to(self.device)
node_state = torch.ones(g.num_nodes(), 1).float().to(self.device)
for layer in self.layers:
node_state = F.dropout(
......@@ -191,7 +191,7 @@ class MWE_DGCN(nn.Module):
self.device = device
def forward(self, g, node_state=None):
node_state = torch.ones(g.number_of_nodes(), 1).float().to(self.device)
node_state = torch.ones(g.num_nodes(), 1).float().to(self.device)
node_state = self.layers[0](g, node_state)
......@@ -350,8 +350,8 @@ class GATConv(nn.Module):
e = self.leaky_relu(e)
if self.training and self.edge_drop > 0:
perm = torch.randperm(graph.number_of_edges(), device=e.device)
bound = int(graph.number_of_edges() * self.edge_drop)
perm = torch.randperm(graph.num_edges(), device=e.device)
bound = int(graph.num_edges() * self.edge_drop)
eids = perm[bound:]
graph.edata["a"] = torch.zeros_like(e)
graph.edata["a"][eids] = self.attn_drop(
......
......@@ -85,8 +85,8 @@ def load_dataset(name, device):
evaluator = get_ogb_evaluator(name)
print(
f"# Nodes: {g.number_of_nodes()}\n"
f"# Edges: {g.number_of_edges()}\n"
f"# Nodes: {g.num_nodes()}\n"
f"# Edges: {g.num_edges()}\n"
f"# Train: {len(train_nid)}\n"
f"# Val: {len(val_nid)}\n"
f"# Test: {len(test_nid)}\n"
......
......@@ -291,9 +291,7 @@ class RelGraphEmbed(nn.Module):
# create weight embeddings for each node for each relation
self.embeds = nn.ParameterDict()
for ntype in g.ntypes:
embed = nn.Parameter(
th.Tensor(g.number_of_nodes(ntype), self.embed_size)
)
embed = nn.Parameter(th.Tensor(g.num_nodes(ntype), self.embed_size))
nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain("relu"))
self.embeds[ntype] = embed
......@@ -408,7 +406,7 @@ class EntityClassify(nn.Module):
for l, layer in enumerate(self.layers):
y = {
k: th.zeros(
g.number_of_nodes(k),
g.num_nodes(k),
self.h_dim if l != len(self.layers) - 1 else self.out_dim,
)
for k in g.ntypes
......@@ -417,7 +415,7 @@ class EntityClassify(nn.Module):
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
dataloader = dgl.dataloading.DataLoader(
g,
{k: th.arange(g.number_of_nodes(k)) for k in g.ntypes},
{k: th.arange(g.num_nodes(k)) for k in g.ntypes},
sampler,
batch_size=batch_size,
shuffle=True,
......@@ -534,7 +532,7 @@ class EntityClassify_HeteroAPI(nn.Module):
for l, layer in enumerate(self.layers):
y = {
k: th.zeros(
g.number_of_nodes(k),
g.num_nodes(k),
self.h_dim if l != len(self.layers) - 1 else self.out_dim,
)
for k in g.ntypes
......@@ -543,7 +541,7 @@ class EntityClassify_HeteroAPI(nn.Module):
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
dataloader = dgl.dataloading.DataLoader(
g,
{k: th.arange(g.number_of_nodes(k)) for k in g.ntypes},
{k: th.arange(g.num_nodes(k)) for k in g.ntypes},
sampler,
batch_size=batch_size,
shuffle=True,
......
......@@ -318,7 +318,7 @@ class DistEmbedLayer(nn.Module):
if feat_name not in g.nodes[ntype].data:
part_policy = g.get_node_partition_policy(ntype)
self.node_embeds[ntype] = dgl.distributed.DistEmbedding(
g.number_of_nodes(ntype),
g.num_nodes(ntype),
self.embed_size,
embed_name + "_" + ntype,
init_emb,
......@@ -330,7 +330,7 @@ class DistEmbedLayer(nn.Module):
# We only create embeddings for nodes without node features.
if feat_name not in g.nodes[ntype].data:
self.node_embeds[ntype] = th.nn.Embedding(
g.number_of_nodes(ntype),
g.num_nodes(ntype),
self.embed_size,
sparse=self.sparse_emb,
)
......@@ -343,7 +343,7 @@ class DistEmbedLayer(nn.Module):
# We only create embeddings for nodes without node features.
if feat_name not in g.nodes[ntype].data:
self.node_embeds[ntype] = th.nn.Embedding(
g.number_of_nodes(ntype), self.embed_size
g.num_nodes(ntype), self.embed_size
)
nn.init.uniform_(self.node_embeds[ntype].weight, -1.0, 1.0)
......@@ -410,7 +410,7 @@ def evaluate(
assert len(logits) == 1
logits = logits["paper"]
eval_logits.append(logits.cpu().detach())
assert np.all(seeds.numpy() < g.number_of_nodes("paper"))
assert np.all(seeds.numpy() < g.num_nodes("paper"))
eval_seeds.append(seeds.cpu().detach())
eval_logits = th.cat(eval_logits)
eval_seeds = th.cat(eval_seeds)
......@@ -428,7 +428,7 @@ def evaluate(
assert len(logits) == 1
logits = logits["paper"]
test_logits.append(logits.cpu().detach())
assert np.all(seeds.numpy() < g.number_of_nodes("paper"))
assert np.all(seeds.numpy() < g.num_nodes("paper"))
test_seeds.append(seeds.cpu().detach())
test_logits = th.cat(test_logits)
test_seeds = th.cat(test_seeds)
......@@ -769,21 +769,15 @@ def main(args):
else:
dev_id = g.rank() % args.num_gpus
device = th.device("cuda:" + str(dev_id))
labels = g.nodes["paper"].data["labels"][
np.arange(g.number_of_nodes("paper"))
]
labels = g.nodes["paper"].data["labels"][np.arange(g.num_nodes("paper"))]
all_val_nid = th.LongTensor(
np.nonzero(
g.nodes["paper"].data["val_mask"][
np.arange(g.number_of_nodes("paper"))
]
g.nodes["paper"].data["val_mask"][np.arange(g.num_nodes("paper"))]
)
).squeeze()
all_test_nid = th.LongTensor(
np.nonzero(
g.nodes["paper"].data["test_mask"][
np.arange(g.number_of_nodes("paper"))
]
g.nodes["paper"].data["test_mask"][np.arange(g.num_nodes("paper"))]
)
).squeeze()
n_classes = len(th.unique(labels[labels >= 0]))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment