Unverified Commit 492ad9be authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[Fix] Avoid Overloading Nonzero for PyTorch Backend (#2434)

* Update gatedgraphconv.py

* Update entity_classify.py

* Update data-process.rst

* Update reading_data.py

* Update data-process.rst

* Update utils.py

* Update knowledge_graph.py

* Update entity_classify.py

* Update rdf.py

* Update entity_classify_mb.py

* Update test_classify.py

* Update tensor.py

* Update sparse.py

* Update entity_classify_mp.py

* Update 6_line_graph.py
parent 5c77b611
......@@ -308,7 +308,7 @@ to see the complete code. The following code uses a subclass of ``KnowledgeGraph
# get training mask
train_mask = graph.edata['train_mask']
train_idx = torch.nonzero(train_mask).squeeze()
train_idx = torch.nonzero(train_mask, as_tuple=False).squeeze()
src, dst = graph.edges(train_idx)
# get edge types in training set
rel = graph.edata['etype'][train_idx]
......
......@@ -286,7 +286,7 @@ DGL建议使用节点掩码来指定数据集的划分。
# 获取训练集掩码
train_mask = graph.edata['train_mask']
train_idx = torch.nonzero(train_mask).squeeze()
train_idx = torch.nonzero(train_mask, as_tuple=False).squeeze()
src, dst = graph.edges(train_idx)
# 获取训练集中的边类型
......
......@@ -114,7 +114,7 @@ def make_undirected(G):
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees()).squeeze(-1)
nodes = torch.nonzero(G.out_degrees(), as_tuple=False).squeeze(-1)
return nodes
class LineDataset:
......
......@@ -30,8 +30,8 @@ def main(args):
num_classes = dataset.num_classes
train_mask = g.nodes[category].data.pop('train_mask')
test_mask = g.nodes[category].data.pop('test_mask')
train_idx = th.nonzero(train_mask).squeeze()
test_idx = th.nonzero(test_mask).squeeze()
train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
labels = g.nodes[category].data.pop('labels')
category_id = len(g.ntypes)
for i, ntype in enumerate(g.ntypes):
......
......@@ -60,8 +60,8 @@ def main(args):
num_classes = dataset.num_classes
train_mask = g.nodes[category].data.pop('train_mask')
test_mask = g.nodes[category].data.pop('test_mask')
train_idx = th.nonzero(train_mask).squeeze()
test_idx = th.nonzero(test_mask).squeeze()
train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
labels = g.nodes[category].data.pop('labels')
# split dataset into train, validate, test
......
......@@ -25,7 +25,7 @@ def main(args):
category = dataset.predict_category
num_classes = dataset.num_classes
test_mask = g.nodes[category].data.pop('test_mask')
test_idx = th.nonzero(test_mask).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
labels = g.nodes[category].data.pop('labels')
# check cuda
......
......@@ -63,8 +63,8 @@ def main(args):
num_classes = dataset.num_classes
train_mask = hg.nodes[category].data.pop('train_mask')
test_mask = hg.nodes[category].data.pop('test_mask')
train_idx = torch.nonzero(train_mask).squeeze()
test_idx = torch.nonzero(test_mask).squeeze()
train_idx = torch.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = torch.nonzero(test_mask, as_tuple=False).squeeze()
labels = hg.nodes[category].data.pop('labels')
# split dataset into train, validate, test
......
......@@ -453,8 +453,8 @@ def main(args, devices):
train_mask = hg.nodes[category].data.pop('train_mask')
test_mask = hg.nodes[category].data.pop('test_mask')
labels = hg.nodes[category].data.pop('labels')
train_idx = th.nonzero(train_mask).squeeze()
test_idx = th.nonzero(test_mask).squeeze()
train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
node_feats = [None] * num_of_ntype
# AIFB, MUTAG, BGS and AM datasets do not provide validation set split.
......
......@@ -175,7 +175,7 @@ def negative_sampling(pos_samples, num_entity, negative_rate):
def sort_and_rank(score, target):
_, indices = torch.sort(score, dim=1, descending=True)
indices = torch.nonzero(indices == target.view(-1, 1))
indices = torch.nonzero(indices == target.view(-1, 1), as_tuple=False)
indices = indices[:, 1].view(-1)
return indices
......
......@@ -30,7 +30,7 @@ def _reduce_grad(grad, shape):
num_to_squeeze = len(grad_shape) - len(in_shape)
# pad inshape
in_shape = (1,) * num_to_squeeze + in_shape
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape), as_tuple=False)
reduce_idx += 1 # skip batch dim
if len(reduce_idx) > 0:
grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)
......
......@@ -498,7 +498,7 @@ def _reduce_grad(grad, shape):
num_to_squeeze = len(grad_shape) - len(in_shape)
# pad inshape
in_shape = (1,) * num_to_squeeze + in_shape
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape), as_tuple=False)
reduce_idx += 1 # skip batch dim
grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)
return grad.view(shape)
......
......@@ -342,7 +342,7 @@ class FB15k237Dataset(KnowledgeGraphDataset):
>>> dataset = FB15k237Dataset()
>>> graph = dataset[0]
>>> train_mask = graph.edata['train_mask']
>>> train_idx = th.nonzero(train_mask).squeeze()
>>> train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(train_idx)
>>> rel = graph.edata['etype'][train_idx]
......@@ -351,7 +351,7 @@ class FB15k237Dataset(KnowledgeGraphDataset):
>>> dataset = FB15k237Dataset()
>>> graph = dataset[0]
>>> val_mask = graph.edata['val_mask']
>>> val_idx = th.nonzero(val_mask).squeeze()
>>> val_idx = th.nonzero(val_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(val_idx)
>>> rel = graph.edata['etype'][val_idx]
......@@ -360,7 +360,7 @@ class FB15k237Dataset(KnowledgeGraphDataset):
>>> dataset = FB15k237Dataset()
>>> graph = dataset[0]
>>> test_mask = graph.edata['test_mask']
>>> test_idx = th.nonzero(test_mask).squeeze()
>>> test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(test_idx)
>>> rel = graph.edata['etype'][test_idx]
......@@ -476,7 +476,7 @@ class FB15kDataset(KnowledgeGraphDataset):
>>> dataset = FB15kDataset()
>>> graph = dataset[0]
>>> train_mask = graph.edata['train_mask']
>>> train_idx = th.nonzero(train_mask).squeeze()
>>> train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(train_idx)
>>> rel = graph.edata['etype'][train_idx]
......@@ -485,7 +485,7 @@ class FB15kDataset(KnowledgeGraphDataset):
>>> dataset = FB15kDataset()
>>> graph = dataset[0]
>>> val_mask = graph.edata['val_mask']
>>> val_idx = th.nonzero(val_mask).squeeze()
>>> val_idx = th.nonzero(val_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(val_idx)
>>> rel = graph.edata['etype'][val_idx]
......@@ -494,7 +494,7 @@ class FB15kDataset(KnowledgeGraphDataset):
>>> dataset = FB15kDataset()
>>> graph = dataset[0]
>>> test_mask = graph.edata['test_mask']
>>> test_idx = th.nonzero(test_mask).squeeze()
>>> test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(test_idx)
>>> rel = graph.edata['etype'][test_idx]
......@@ -613,7 +613,7 @@ class WN18Dataset(KnowledgeGraphDataset):
>>> dataset = WN18Dataset()
>>> graph = dataset[0]
>>> train_mask = graph.edata['train_mask']
>>> train_idx = th.nonzero(train_mask).squeeze()
>>> train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(train_idx)
>>> rel = graph.edata['etype'][train_idx]
......@@ -622,7 +622,7 @@ class WN18Dataset(KnowledgeGraphDataset):
>>> dataset = WN18Dataset()
>>> graph = dataset[0]
>>> val_mask = graph.edata['val_mask']
>>> val_idx = th.nonzero(val_mask).squeeze()
>>> val_idx = th.nonzero(val_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(val_idx)
>>> rel = graph.edata['etype'][val_idx]
......@@ -631,7 +631,7 @@ class WN18Dataset(KnowledgeGraphDataset):
>>> dataset = WN18Dataset()
>>> graph = dataset[0]
>>> test_mask = graph.edata['test_mask']
>>> test_idx = th.nonzero(test_mask).squeeze()
>>> test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
>>> src, dst = graph.edges(test_idx)
>>> rel = graph.edata['etype'][test_idx]
......
......@@ -557,14 +557,14 @@ class AIFBDataset(RDFGraphDataset):
>>> dataset = AIFBDataset()
>>> graph = dataset[0]
>>> train_mask = graph.nodes[dataset.category].data['train_mask']
>>> train_idx = th.nonzero(train_mask).squeeze()
>>> train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
- ``test_idx`` is deprecated, it can be replaced by:
>>> dataset = AIFBDataset()
>>> graph = dataset[0]
>>> test_mask = graph.nodes[dataset.category].data['test_mask']
>>> test_idx = th.nonzero(test_mask).squeeze()
>>> test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
AIFB DataSet is a Semantic Web (RDF) dataset used as a benchmark in
data mining. It records the organizational structure of AIFB at the
......
......@@ -147,7 +147,7 @@ class GatedGraphConv(nn.Module):
for _ in range(self._n_steps):
graph.ndata['h'] = feat
for i in range(self._n_etypes):
eids = (etypes == i).nonzero().view(-1).type(graph.idtype)
eids = th.nonzero(etypes == i, as_tuple=False).view(-1).type(graph.idtype)
if len(eids) > 0:
graph.apply_edges(
lambda edges: {'W_e*h': self.linears[i](edges.src['h'])},
......
......@@ -92,12 +92,12 @@ G = dgl.DGLGraph(data.graph)
labels = th.tensor(data.labels)
# find all the nodes labeled with class 0
label0_nodes = th.nonzero(labels == 0).squeeze()
label0_nodes = th.nonzero(labels == 0, as_tuple=False).squeeze()
# find all the edges pointing to class 0 nodes
src, _ = G.in_edges(label0_nodes)
src_labels = labels[src]
# find all the edges whose both endpoints are in class 0
intra_src = th.nonzero(src_labels == 0)
intra_src = th.nonzero(src_labels == 0, as_tuple=False)
print('Intra-class edges percent: %.4f' % (len(intra_src) / len(src_labels)))
###########################################################################################
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment