Unverified Commit 597ac7f8 authored by Minjie Wang's avatar Minjie Wang Committed by GitHub
Browse files

[Release] v0.1.3 (#288)

* 0.1.2 release

* oops

* more fixes on windows

* [Bugfix] fix download dir (#275)

* fix download dir

* add doc for the env var

* windows 7 -> 10

* doc update

* [Bugfix] Fix conversion from networkx (#286)

* fix from_nx when no edge id available

* add test cases

* more detailed tests

* more comments

* [Bugfix] Switch to sparse_coo_matrix for torch 1.0+ (#282)

* switch to sparse_coo_matrix for torch 1.0+

* fix bug when the version is 0.4.1.post2

* change to distutils
parent 8bc01c63
from __future__ import absolute_import
from distutils.version import LooseVersion
import torch as th
from torch.utils import dlpack
TH_VERSION = LooseVersion(th.__version__)
def data_type_dict():
return {'float16' : th.float16,
'float32' : th.float32,
......@@ -19,14 +23,24 @@ def cpu():
def tensor(data, dtype=None):
return th.tensor(data, dtype=dtype)
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
# NOTE: use _sparse_coo_tensor_unsafe to avoid unnecessary boundary check
spmat = th._sparse_coo_tensor_unsafe(index[1], data, shape)
# No conversion is required.
return spmat, None
if TH_VERSION.version[0] == 0:
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
# NOTE: use _sparse_coo_tensor_unsafe to avoid unnecessary boundary check
spmat = th._sparse_coo_tensor_unsafe(index[1], data, shape)
# No conversion is required.
return spmat, None
else:
# VERSION 1.0+
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
spmat = th.sparse_coo_tensor(index[1], data, shape)
# No conversion is required.
return spmat, None
def sparse_matrix_indices(spmat):
return ('coo', spmat._indices())
......
......@@ -1167,6 +1167,7 @@ class DGLGraph(object):
else:
return F.tensor(lst)
if node_attrs is not None:
# mapping from feature name to a list of tensors to be concatenated
attr_dict = defaultdict(list)
for nid in range(self.number_of_nodes()):
for attr in node_attrs:
......@@ -1175,14 +1176,19 @@ class DGLGraph(object):
self._node_frame[attr] = _batcher(attr_dict[attr])
if edge_attrs is not None:
has_edge_id = 'id' in next(iter(nx_graph.edges(data=True)))[-1]
# mapping from feature name to a list of tensors to be concatenated
attr_dict = defaultdict(lambda: [None] * self.number_of_edges())
# each defaultdict value is initialized to be a list of None
# None here serves as placeholder to be replaced by feature with
# corresponding edge id
if has_edge_id:
for _, _, attrs in nx_graph.edges(data=True):
for key in edge_attrs:
attr_dict[key][attrs['id']] = attrs[key]
else:
# XXX: assuming networkx iteration order is deterministic
for eid, (_, _, attr) in enumerate(nx_graph.edges(data=True)):
# so the order is the same as graph_index.from_networkx
for eid, (_, _, attrs) in enumerate(nx_graph.edges(data=True)):
for key in edge_attrs:
attr_dict[key][eid] = attrs[key]
for attr in edge_attrs:
......
......@@ -142,6 +142,8 @@ def test_nx_conversion():
# check conversion between networkx and DGLGraph
def _check_nx_feature(nxg, nf, ef):
# check node and edge feature of nxg
# this is used to check to_networkx
num_nodes = len(nxg)
num_edges = nxg.size()
if num_nodes > 0:
......@@ -185,18 +187,24 @@ def test_nx_conversion():
assert nxg.size() == 4
_check_nx_feature(nxg, {'n1': n1, 'n3': n3}, {'e1': e1, 'e2': e2})
# convert to DGLGraph
# convert to DGLGraph, nx graph has id in edge feature
# use id feature to test non-tensor copy
g.from_networkx(nxg, node_attrs=['n1'], edge_attrs=['e1', 'id'])
# check graph size
assert g.number_of_nodes() == 5
assert g.number_of_edges() == 4
assert U.allclose(g.get_n_repr()['n1'], n1)
assert U.allclose(g.get_e_repr()['e1'], e1)
# check number of features
# test with existing dglgraph (so existing features should be cleared)
assert len(g.ndata) == 1
assert len(g.edata) == 2
# check feature values
assert U.allclose(g.ndata['n1'], n1)
# with id in nx edge feature, e1 should follow original order
assert U.allclose(g.edata['e1'], e1)
assert th.equal(g.get_e_repr()['id'], th.arange(4))
g.pop_e_repr('id')
# test modifying DGLGraph
# test conversion after modifying DGLGraph
g.pop_e_repr('id') # pop id so we don't need to provide id when adding edges
new_n = th.randn(2, 3)
new_e = th.randn(3, 5)
g.add_nodes(2, data={'n1': new_n})
......@@ -210,6 +218,28 @@ def test_nx_conversion():
assert nxg.size() == 7
_check_nx_feature(nxg, {'n1': n1}, {'e1': e1})
# now test convert from networkx without id in edge feature
# first pop id in edge feature
for _, _, attr in nxg.edges(data=True):
attr.pop('id')
# test with a new graph
g = DGLGraph(multigraph=True)
g.from_networkx(nxg, node_attrs=['n1'], edge_attrs=['e1'])
# check graph size
assert g.number_of_nodes() == 7
assert g.number_of_edges() == 7
# check number of features
assert len(g.ndata) == 1
assert len(g.edata) == 1
# check feature values
assert U.allclose(g.ndata['n1'], n1)
# edge feature order follows nxg.edges()
edge_feat = []
for _, _, attr in nxg.edges(data=True):
edge_feat.append(attr['e1'].unsqueeze(0))
edge_feat = th.cat(edge_feat, dim=0)
assert U.allclose(g.edata['e1'], edge_feat)
def test_batch_send():
g = generate_graph()
def _fmsg(edges):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment