"cacheflow/master/server.py" did not exist on "04e5acc08ed5b878225491bf62540ea10274fb29"
Unverified Commit 3c8ac093 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Rename number_of_edges and number_of_nodes to num_edges and num_nodes. (#5490)



* Other

* revert

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal>
parent 8f9f2e2a
......@@ -27,7 +27,7 @@ DGL v0.6.0은 heterogeneous 그래프들을 위한 분산 학습을 실험적으
.. code:: python
g.nodes['T0'].data['feat1'] = dgl.distributed.DistTensor((g.number_of_nodes('T0'), 1), th.float32, 'feat1',
g.nodes['T0'].data['feat1'] = dgl.distributed.DistTensor((g.num_nodes('T0'), 1), th.float32, 'feat1',
part_policy=g.get_node_partition_policy('T0'))
분산 텐서 및 분산 임베딩을 만들기 위한 파티션 정책은 heterogeneous 그래프가 그래프 서버에 로드될 때 초기화된다. 사용자는 새로운 파티션 정책을 실행 중에 생성할 수 없다. 따라서, 사용자는 노드 타입 이나 에지 타입에 대한 분산 텐서 또는 분산 임베딩 만을 만들 수 있다.
......
......@@ -283,7 +283,7 @@ GNN 레이어 수를 상위 클래스에 전달해야 한다.
dst = dst[mask]
# Return a new graph with the same nodes as the original graph as a
# frontier
frontier = dgl.graph((src, dst), num_nodes=g.number_of_nodes())
frontier = dgl.graph((src, dst), num_nodes=g.num_nodes())
return frontier
def __len__(self):
......@@ -335,7 +335,7 @@ Heterogeneous 그래프에 대한 프론티어를 생성하는 것은 homogeneou
new_edges_masks = {}
# Iterate over all edge types
for etype in sg.canonical_etypes:
edge_mask = torch.zeros(sg.number_of_edges(etype))
edge_mask = torch.zeros(sg.num_edges(etype))
edge_mask.bernoulli_(self.p)
new_edges_masks[etype] = edge_mask.bool()
......
......@@ -46,7 +46,7 @@
.. code:: python
n_edges = g.number_of_edges()
n_edges = g.num_edges()
dataloader = dgl.dataloading.EdgeDataLoader(
g, train_eid_dict, sampler,
......
......@@ -43,13 +43,13 @@ GPU를 사용해서 GNN을 학습하는데 메모리와 걸리는 시간을 줄
"""
# Compute representations layer by layer
for l, layer in enumerate([self.conv1, self.conv2]):
y = torch.zeros(g.number_of_nodes(),
y = torch.zeros(g.num_nodes(),
self.hidden_features
if l != self.n_layers - 1
else self.out_features)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
dataloader = dgl.dataloading.NodeDataLoader(
g, torch.arange(g.number_of_nodes()), sampler,
g, torch.arange(g.num_nodes()), sampler,
batch_size=batch_size,
shuffle=True,
drop_last=False)
......
......@@ -55,7 +55,7 @@ Homogeneous 그래프나 heterogeneous 그래프를 대상으로 전체 그래
- 첫 몇 행들(row)을 잘라서 입력 피쳐들로부터 출력 노드의 피처를 얻는다. 행의 개수는 :meth:`block.number_of_dst_nodes <dgl.DGLGraph.number_of_dst_nodes>` 로 얻는다.
- 원본 그래프가 한 하나의 노드 타입을 갖는 경우, :attr:`g.ndata <dgl.DGLGraph.ndata>` 를 입력 노드의 피쳐의 경우 :attr:`block.srcdata <dgl.DGLGraph.srcdata>` 로 또는 출력 노드의 피쳐의 경우 :attr:`block.dstdata <dgl.DGLGraph.dstdata>` 로 교체한다.
- 원본 그래프가 여러 종류의 노드 타입을 갖는 경우, :attr:`g.nodes <dgl.DGLGraph.nodes>` 를 입력 노드의 피쳐의 경우 :attr:`block.srcnodes <dgl.DGLGraph.srcnodes>` 로 또는 출력 노드의 피처의 경우 :attr:`block.dstnodes <dgl.DGLGraph.dstnodes>` 로 교체한다.
- :meth:`g.number_of_nodes <dgl.DGLGraph.number_of_nodes>` 를 입력 노드의 개수는 :meth:`block.number_of_src_nodes <dgl.DGLGraph.number_of_src_nodes>` 로 출력 노드의 개수는 :meth:`block.number_of_dst_nodes <dgl.DGLGraph.number_of_dst_nodes>` 로 각각 교체한다.
- :meth:`g.num_nodes <dgl.DGLGraph.num_nodes>` 를 입력 노드의 개수는 :meth:`block.number_of_src_nodes <dgl.DGLGraph.number_of_src_nodes>` 로 출력 노드의 개수는 :meth:`block.number_of_dst_nodes <dgl.DGLGraph.number_of_dst_nodes>` 로 각각 교체한다.
Heterogeneous 그래프들
~~~~~~~~~~~~~~~~~~~~
......
......@@ -108,7 +108,7 @@ GNN 모델의 forward 패스(loss 계산 포함)를 ``torch.cuda.amp.autocast()`
in_feats = features.shape[1]
n_hidden = 256
n_classes = data.num_classes
n_edges = g.number_of_edges()
n_edges = g.num_edges()
heads = [1, 1, 1]
model = GAT(in_feats, n_hidden, n_classes, heads)
model = model.to(device)
......
......@@ -466,14 +466,14 @@ def create_dgl_object(
)
part_graph.edata[dgl.EID] = th.arange(
edgeid_offset,
edgeid_offset + part_graph.number_of_edges(),
edgeid_offset + part_graph.num_edges(),
dtype=th.int64,
)
part_graph.edata[dgl.ETYPE] = th.as_tensor(
etype_ids, dtype=RESERVED_FIELD_DTYPE[dgl.ETYPE]
)
part_graph.edata["inner_edge"] = th.ones(
part_graph.number_of_edges(), dtype=RESERVED_FIELD_DTYPE["inner_edge"]
part_graph.num_edges(), dtype=RESERVED_FIELD_DTYPE["inner_edge"]
)
# compute per_type_ids and ntype for all the nodes in the graph.
......
......@@ -115,8 +115,8 @@ def _read_graph(schema):
)
# print from graph
logging.info(f"|V|= {g.number_of_nodes()}")
logging.info(f"|E|= {g.number_of_edges()}")
logging.info(f"|V|= {g.num_nodes()}")
logging.info(f"|E|= {g.num_edges()}")
for ntype in g.ntypes:
for name, data in g.nodes[ntype].data.items():
if isinstance(data, th.Tensor):
......
......@@ -88,19 +88,19 @@ g = dataset[0]
# Split edge set for training and testing
u, v = g.edges()
eids = np.arange(g.number_of_edges())
eids = np.arange(g.num_edges())
eids = np.random.permutation(eids)
test_size = int(len(eids) * 0.1)
train_size = g.number_of_edges() - test_size
train_size = g.num_edges() - test_size
test_pos_u, test_pos_v = u[eids[:test_size]], v[eids[:test_size]]
train_pos_u, train_pos_v = u[eids[test_size:]], v[eids[test_size:]]
# Find all negative edges and split them for training and testing
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
adj_neg = 1 - adj.todense() - np.eye(g.number_of_nodes())
adj_neg = 1 - adj.todense() - np.eye(g.num_nodes())
neg_u, neg_v = np.where(adj_neg != 0)
neg_eids = np.random.choice(len(neg_u), g.number_of_edges())
neg_eids = np.random.choice(len(neg_u), g.num_edges())
test_neg_u, test_neg_v = (
neg_u[neg_eids[:test_size]],
neg_v[neg_eids[:test_size]],
......@@ -190,15 +190,11 @@ class GraphSAGE(nn.Module):
# for the training set and the test set respectively.
#
train_pos_g = dgl.graph(
(train_pos_u, train_pos_v), num_nodes=g.number_of_nodes()
)
train_neg_g = dgl.graph(
(train_neg_u, train_neg_v), num_nodes=g.number_of_nodes()
)
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=g.num_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=g.num_nodes())
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=g.number_of_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=g.number_of_nodes())
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=g.num_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=g.num_nodes())
######################################################################
......
'''
"""
Distributed Node Classification
===============================
......@@ -47,11 +47,11 @@ the boolean arrays will be stored with the graph partitions.
splitted_idx = data.get_idx_split()
train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']
train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
train_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
train_mask[train_nid] = True
val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
val_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
val_mask[val_nid] = True
test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
test_mask = th.zeros((graph.num_nodes(),), dtype=th.bool)
test_mask[test_nid] = True
graph.ndata['train_mask'] = train_mask
graph.ndata['val_mask'] = val_mask
......@@ -245,7 +245,7 @@ The code below defines the GraphSage model.
return x
num_hidden = 256
num_labels = len(th.unique(g.ndata['labels'][0:g.number_of_nodes()]))
num_labels = len(th.unique(g.ndata['labels'][0:g.num_nodes()]))
num_layers = 2
lr = 0.001
model = SAGE(g.ndata['feat'].shape[1], num_hidden, num_labels, num_layers)
......@@ -436,4 +436,4 @@ If we split the graph into four partitions as demonstrated at the beginning of t
ip_addr3
ip_addr4
'''
"""
'''
"""
Distributed Link Prediction
===============================
......@@ -106,8 +106,8 @@ by invoking `node_split` and `edge_split`. We can also get the valid edges and t
.. code-block:: python
train_eids = dgl.distributed.edge_split(th.ones((g.number_of_edges(),), dtype=th.bool), g.get_partition_book(), force_even=True)
train_nids = dgl.distributed.node_split(th.ones((g.number_of_nodes(),), dtype=th.bool), g.get_partition_book())
train_eids = dgl.distributed.edge_split(th.ones((g.num_edges(),), dtype=th.bool), g.get_partition_book(), force_even=True)
train_nids = dgl.distributed.node_split(th.ones((g.num_nodes(),), dtype=th.bool), g.get_partition_book())
with open('4part_data/val.pkl', 'rb') as f:
global_valid_eid = pickle.load(f)
with open('4part_data/test.pkl', 'rb') as f:
......@@ -150,7 +150,7 @@ The code below defines the GraphSage model.
return x
num_hidden = 256
num_labels = len(th.unique(g.ndata['labels'][0:g.number_of_nodes()]))
num_labels = len(th.unique(g.ndata['labels'][0:g.num_nodes()]))
num_layers = 2
lr = 0.001
model = SAGE(g.ndata['feat'].shape[1], num_hidden, num_labels, num_layers)
......@@ -243,7 +243,7 @@ In the inference stage, we use the model after training loop to get the embeddin
with th.no_grad():
sampler = dgl.dataloading.MultiLayerNeighborSampler([25,10])
train_dataloader = dgl.dataloading.DistNodeDataLoader(
graph, th.arange(graph.number_of_nodes()), sampler,
graph, th.arange(graph.num_nodes()), sampler,
batch_size=1024,
shuffle=False,
drop_last=False)
......@@ -288,4 +288,4 @@ Set up distributed training environment
The distributed training environment set up is similar to the distributed node classification. Please refer here for more details:
`Set up distributed training environment <https://docs.dgl.ai/en/latest/tutorials/dist/1_node_classification.html#set-up-distributed-training-environment>`_
'''
"""
......@@ -120,7 +120,7 @@ sampler = dgl.dataloading.as_edge_prediction_sampler(
train_dataloader = dgl.dataloading.DataLoader(
# The following arguments are specific to DataLoader.
graph, # The graph
torch.arange(graph.number_of_edges()), # The edges to iterate over
torch.arange(graph.num_edges()), # The edges to iterate over
sampler, # The neighbor sampler
device=device, # Put the MFGs on CPU or GPU
# The following arguments are inherited from PyTorch DataLoader.
......@@ -140,15 +140,15 @@ input_nodes, pos_graph, neg_graph, mfgs = next(iter(train_dataloader))
print("Number of input nodes:", len(input_nodes))
print(
"Positive graph # nodes:",
pos_graph.number_of_nodes(),
pos_graph.num_nodes(),
"# edges:",
pos_graph.number_of_edges(),
pos_graph.num_edges(),
)
print(
"Negative graph # nodes:",
neg_graph.number_of_nodes(),
neg_graph.num_nodes(),
"# edges:",
neg_graph.number_of_edges(),
neg_graph.num_edges(),
)
print(mfgs)
......@@ -264,12 +264,12 @@ class DotPredictor(nn.Module):
def inference(model, graph, node_features):
with torch.no_grad():
nodes = torch.arange(graph.number_of_nodes())
nodes = torch.arange(graph.num_nodes())
sampler = dgl.dataloading.NeighborSampler([4, 4])
train_dataloader = dgl.dataloading.DataLoader(
graph,
torch.arange(graph.number_of_nodes()),
torch.arange(graph.num_nodes()),
sampler,
batch_size=1024,
shuffle=False,
......
......@@ -256,7 +256,7 @@ print(dgl.topological_nodes_generator(trv_graph))
import dgl.function as fn
import torch as th
trv_graph.ndata["a"] = th.ones(graph.number_of_nodes(), 1)
trv_graph.ndata["a"] = th.ones(graph.num_nodes(), 1)
traversal_order = dgl.topological_nodes_generator(trv_graph)
trv_graph.prop_nodes(
traversal_order,
......@@ -400,7 +400,7 @@ train_loader = DataLoader(
for epoch in range(epochs):
for step, batch in enumerate(train_loader):
g = batch.graph
n = g.number_of_nodes()
n = g.num_nodes()
h = th.zeros((n, h_size))
c = th.zeros((n, h_size))
logits = model(batch, h, c)
......
......@@ -120,10 +120,10 @@ g.add_edges([2, 0], [0, 2]) # Add edges (2, 0), (0, 2)
def forward_inference(self):
stop = self.add_node_and_update()
while (not stop) and (self.g.number_of_nodes() < self.v_max + 1):
while (not stop) and (self.g.num_nodes() < self.v_max + 1):
num_trials = 0
to_add_edge = self.add_edge_or_not()
while to_add_edge and (num_trials < self.g.number_of_nodes() - 1):
while to_add_edge and (num_trials < self.g.num_nodes() - 1):
self.choose_dest_and_update()
num_trials += 1
to_add_edge = self.add_edge_or_not()
......@@ -174,7 +174,7 @@ def forward_inference(self):
# ax.cla()
# g_t = evolution[i]
# nx.draw_circular(g_t, with_labels=True, ax=ax,
# node_color=['#FEBD69'] * g_t.number_of_nodes())
# node_color=['#FEBD69'] * g_t.num_nodes())
#
# fig, ax = plt.subplots()
# ani = animation.FuncAnimation(fig, animate,
......@@ -347,7 +347,7 @@ class GraphEmbed(nn.Module):
self.node_to_graph = nn.Linear(node_hidden_size, self.graph_hidden_size)
def forward(self, g):
if g.number_of_nodes() == 0:
if g.num_nodes() == 0:
return torch.zeros(1, self.graph_hidden_size)
else:
# Node features are stored as hv in ndata.
......@@ -443,7 +443,7 @@ class GraphProp(nn.Module):
return {"a": node_activation}
def forward(self, g):
if g.number_of_edges() > 0:
if g.num_edges() > 0:
for t in range(self.num_prop_rounds):
g.update_all(
message_func=self.dgmg_msg, reduce_func=self.reduce_funcs[t]
......@@ -514,7 +514,7 @@ class AddNode(nn.Module):
def _initialize_node_repr(self, g, node_type, graph_embed):
"""Whenver a node is added, initialize its representation."""
num_nodes = g.number_of_nodes()
num_nodes = g.num_nodes()
hv_init = self.initialize_hv(
torch.cat(
[
......@@ -581,7 +581,7 @@ class AddEdge(nn.Module):
def forward(self, g, action=None):
graph_embed = self.graph_op["embed"](g)
src_embed = g.nodes[g.number_of_nodes() - 1].data["hv"]
src_embed = g.nodes[g.num_nodes() - 1].data["hv"]
logit = self.add_edge(torch.cat([graph_embed, src_embed], dim=1))
prob = torch.sigmoid(logit)
......@@ -631,7 +631,7 @@ class ChooseDestAndUpdate(nn.Module):
self.log_prob = []
def forward(self, g, dest):
src = g.number_of_nodes() - 1
src = g.num_nodes() - 1
possible_dests = range(src)
src_embed_expand = g.nodes[src].data["hv"].expand(src, -1)
......@@ -764,7 +764,7 @@ def is_valid(g):
else:
return i + 1
size = g.number_of_nodes()
size = g.num_nodes()
if size < 10 or size > 20:
return False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment