Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
a1038eb1
Commit
a1038eb1
authored
Sep 19, 2018
by
Minjie Wang
Browse files
graph index
parent
a81d27dc
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
237 additions
and
153 deletions
+237
-153
python/dgl/backend/pytorch.py
python/dgl/backend/pytorch.py
+18
-10
python/dgl/graph.py
python/dgl/graph.py
+13
-22
python/dgl/graph_index.py
python/dgl/graph_index.py
+67
-26
python/dgl/ndarray.py
python/dgl/ndarray.py
+43
-4
python/dgl/utils.py
python/dgl/utils.py
+50
-54
src/graph/graph_apis.cc
src/graph/graph_apis.cc
+46
-37
No files found.
python/dgl/backend/pytorch.py
View file @
a1038eb1
...
...
@@ -34,6 +34,9 @@ def astype(a, ty):
def
asnumpy
(
a
):
return
a
.
cpu
().
numpy
()
def
from_numpy
(
np_data
):
return
th
.
from_numpy
(
np_data
)
def
pack
(
tensors
):
return
th
.
cat
(
tensors
)
...
...
@@ -43,8 +46,8 @@ def unpack(x, indices_or_sections=1):
def
shape
(
x
):
return
x
.
shape
def
isinteger
(
x
):
return
x
.
dtype
in
[
th
.
int
,
th
.
int8
,
th
.
int16
,
th
.
int32
,
th
.
int64
]
def
dtype
(
x
):
return
x
.
dtype
unique
=
th
.
unique
...
...
@@ -107,11 +110,16 @@ def _typestr(arr_dtype):
else
:
raise
RuntimeError
(
'Unsupported data type:'
,
arr_dtype
)
def
asdglarray
(
arr
):
"""The data is copied to the new array."""
assert
arr
.
is_contiguous
()
rst
=
nd
.
empty
(
tuple
(
arr
.
shape
),
_typestr
(
arr
.
dtype
),
get_context
(
arr
))
data
=
ctypes
.
cast
(
arr
.
data_ptr
(),
ctypes
.
c_void_p
)
nbytes
=
ctypes
.
c_size_t
(
arr
.
numel
()
*
arr
.
element_size
())
check_call
(
_LIB
.
TVMArrayCopyFromBytes
(
rst
.
handle
,
data
,
nbytes
))
return
rst
def
astvmarray
(
arr_data
):
"""Return a TVMArray representation of the underlying data."""
data
=
arr_data
assert
data
.
is_contiguous
()
arr
=
TVMArray
()
shape
=
c_array
(
tvm_shape_index_t
,
tuple
(
data
.
shape
))
arr
.
data
=
ctypes
.
cast
(
data
.
data_ptr
(),
ctypes
.
c_void_p
)
arr
.
shape
=
shape
arr
.
strides
=
None
arr
.
dtype
=
TVMType
(
_typestr
(
data
.
dtype
))
arr
.
ndim
=
len
(
shape
)
arr
.
ctx
=
get_context
(
data
)
return
arr
python/dgl/graph.py
View file @
a1038eb1
...
...
@@ -3,19 +3,17 @@
from
__future__
import
absolute_import
import
networkx
as
nx
from
networkx.classes.digraph
import
DiGraph
import
dgl
from
.base
import
ALL
,
is_all
,
__MSG__
,
__REPR__
from
.
import
backend
as
F
from
.backend
import
Tensor
from
.
cached_graph
import
CachedGraph
,
create_cached_graph
from
.
graph_index
import
GraphIndex
from
.frame
import
FrameRef
,
merge_frames
from
.nx_adapt
import
nx_init
from
.
import
scheduler
from
.
import
utils
class
DGLGraph
(
DiGraph
):
class
DGLGraph
(
object
):
"""Base graph class specialized for neural networks on graphs.
TODO(minjie): document of batching semantics
...
...
@@ -38,20 +36,20 @@ class DGLGraph(DiGraph):
edge_frame
=
None
,
**
attr
):
# TODO(minjie): maintaining node/edge list is costly when graph is large.
self
.
_edge_list
=
[]
nx_init
(
self
,
self
.
_add_
nod
e_callback
,
self
.
_
add_edg
e_callback
,
self
.
_del_
nod
e_callback
,
self
.
_del_edge_callback
,
graph_data
,
**
attr
)
# cached graph and storage
self
.
_cached_graph
=
Non
e
#nx_init(self,
# self._add_node_callback
,
#
self._add_
edg
e_callback,
#
self._
del_nod
e_callback,
#
self._del_
edg
e_callback,
#
graph_data
,
#
**attr)
# graph
self
.
_graph
=
GraphIndex
(
graph_data
)
# fram
e
self
.
_node_frame
=
node_frame
if
node_frame
is
not
None
else
FrameRef
()
self
.
_edge_frame
=
edge_frame
if
edge_frame
is
not
None
else
FrameRef
()
# other class members
self
.
_msg_graph
=
None
self
.
_msg_graph
=
GraphIndex
()
self
.
_msg_frame
=
FrameRef
()
self
.
_message_func
=
(
None
,
None
)
self
.
_reduce_func
=
(
None
,
None
)
...
...
@@ -919,13 +917,6 @@ class DGLGraph(DiGraph):
pos
=
graphviz_layout
(
self
,
prog
=
'dot'
)
nx
.
draw
(
self
,
pos
,
with_labels
=
True
)
@
property
def
cached_graph
(
self
):
# TODO: dirty flag when mutated
if
self
.
_cached_graph
is
None
:
self
.
_cached_graph
=
create_cached_graph
(
self
)
return
self
.
_cached_graph
@
property
def
msg_graph
(
self
):
# TODO: dirty flag when mutated
...
...
python/dgl/
c
graph.py
→
python/dgl/graph
_index
.py
View file @
a1038eb1
from
__future__
import
absolute_import
import
numpy
as
np
import
networkx
as
nx
from
._ffi.function
import
_init_api
from
.
import
backend
as
F
from
.
import
utils
...
...
@@ -9,12 +12,15 @@ class GraphIndex(object):
Parameters
----------
graph_data : graph data
graph_data : graph data
, optional
Data to initialize graph. Same as networkx's semantics.
"""
def
__init__
(
self
,
graph_data
=
None
):
# TODO: convert from graph data
self
.
_handle
=
_CAPI_DGLGraphCreate
()
if
isinstance
(
graph_data
,
nx
.
DiGraph
):
self
.
from_networkx
(
graph_data
)
elif
graph_data
is
not
None
:
self
.
from_networkx
(
nx
.
DiGraph
(
graph_data
))
def
__del__
(
self
):
"""Free this graph index object."""
...
...
@@ -52,10 +58,6 @@ class GraphIndex(object):
v : utils.Index
The dst nodes.
"""
#u = utils.Index(u)
#v = utils.Index(v)
#u_array = F.asdglarray(u.totensor())
#v_array = F.asdglarray(v.totensor())
u_array
=
u
.
todgltensor
()
v_array
=
v
.
todgltensor
()
_CAPI_DGLGraphAddEdges
(
self
.
_handle
,
u_array
,
v_array
)
...
...
@@ -113,7 +115,7 @@ class GraphIndex(object):
0-1 array indicating existence
"""
vid_array
=
vids
.
todgltensor
()
return
utils
.
I
ndex
(
_CAPI_DGLGraphHasVertices
(
self
.
_handle
,
vid_array
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphHasVertices
(
self
.
_handle
,
vid_array
))
def
has_edge
(
self
,
u
,
v
):
"""Return true if the edge exists.
...
...
@@ -149,7 +151,7 @@ class GraphIndex(object):
"""
u_array
=
u
.
todgltensor
()
v_array
=
v
.
todgltensor
()
return
utils
.
I
ndex
(
_CAPI_DGLGraphHasEdges
(
self
.
_handle
,
u_array
,
v_array
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphHasEdges
(
self
.
_handle
,
u_array
,
v_array
))
def
predecessors
(
self
,
v
,
radius
=
1
):
"""Return the predecessors of the node.
...
...
@@ -166,7 +168,7 @@ class GraphIndex(object):
utils.Index
Array of predecessors
"""
return
utils
.
I
ndex
(
_CAPI_DGLGraphPredecessors
(
self
.
_handle
,
v
,
radius
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphPredecessors
(
self
.
_handle
,
v
,
radius
))
def
successors
(
self
,
v
,
radius
=
1
):
"""Return the successors of the node.
...
...
@@ -183,7 +185,7 @@ class GraphIndex(object):
utils.Index
Array of successors
"""
return
utils
.
I
ndex
(
_CAPI_DGLGraphSuccessors
(
self
.
_handle
,
v
,
radius
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphSuccessors
(
self
.
_handle
,
v
,
radius
))
def
edge_id
(
self
,
u
,
v
):
"""Return the id of the edge.
...
...
@@ -219,7 +221,7 @@ class GraphIndex(object):
"""
u_array
=
u
.
todgltensor
()
v_array
=
v
.
todgltensor
()
return
utils
.
I
ndex
(
_CAPI_DGLGraphEdgeIds
(
self
.
_handle
,
u_array
,
v_array
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphEdgeIds
(
self
.
_handle
,
u_array
,
v_array
))
def
in_edges
(
self
,
v
):
"""Return the in edges of the node(s).
...
...
@@ -243,9 +245,9 @@ class GraphIndex(object):
else
:
v_array
=
v
.
todgltensor
()
edge_array
=
_CAPI_DGLGraphInEdges_2
(
self
.
_handle
,
v_array
)
src
=
utils
.
I
ndex
(
edge_array
(
0
))
dst
=
utils
.
I
ndex
(
edge_array
(
1
))
eid
=
utils
.
I
ndex
(
edge_array
(
2
))
src
=
utils
.
toi
ndex
(
edge_array
(
0
))
dst
=
utils
.
toi
ndex
(
edge_array
(
1
))
eid
=
utils
.
toi
ndex
(
edge_array
(
2
))
return
src
,
dst
,
eid
def
out_edges
(
self
,
v
):
...
...
@@ -270,9 +272,9 @@ class GraphIndex(object):
else
:
v_array
=
v
.
todgltensor
()
edge_array
=
_CAPI_DGLGraphOutEdges_2
(
self
.
_handle
,
v_array
)
src
=
utils
.
I
ndex
(
edge_array
(
0
))
dst
=
utils
.
I
ndex
(
edge_array
(
1
))
eid
=
utils
.
I
ndex
(
edge_array
(
2
))
src
=
utils
.
toi
ndex
(
edge_array
(
0
))
dst
=
utils
.
toi
ndex
(
edge_array
(
1
))
eid
=
utils
.
toi
ndex
(
edge_array
(
2
))
return
src
,
dst
,
eid
def
edges
(
self
,
sorted
=
False
):
...
...
@@ -293,9 +295,9 @@ class GraphIndex(object):
The edge ids.
"""
edge_array
=
_CAPI_DGLGraphEdges
(
self
.
_handle
,
sorted
)
src
=
edge_array
(
0
)
dst
=
edge_array
(
1
)
eid
=
edge_array
(
2
)
src
=
utils
.
toindex
(
edge_array
(
0
)
)
dst
=
utils
.
toindex
(
edge_array
(
1
)
)
eid
=
utils
.
toindex
(
edge_array
(
2
)
)
return
src
,
dst
,
eid
def
in_degree
(
self
,
v
):
...
...
@@ -327,7 +329,7 @@ class GraphIndex(object):
The in degree array.
"""
v_array
=
v
.
todgltensor
()
return
utils
.
I
ndex
(
_CAPI_DGLGraphInDegrees
(
self
.
_handle
,
v_array
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphInDegrees
(
self
.
_handle
,
v_array
))
def
out_degree
(
self
,
v
):
"""Return the out degree of the node.
...
...
@@ -358,17 +360,56 @@ class GraphIndex(object):
The out degree array.
"""
v_array
=
v
.
todgltensor
()
return
utils
.
I
ndex
(
_CAPI_DGLGraphOutDegrees
(
self
.
_handle
,
v_array
))
return
utils
.
toi
ndex
(
_CAPI_DGLGraphOutDegrees
(
self
.
_handle
,
v_array
))
def
as
networkx
(
self
):
def
to_
networkx
(
self
):
"""Convert to networkx graph.
The edge id will be saved as the 'id' edge attribute.
Returns
-------
networkx.DiGraph
The nx graph
"""
# TODO
return
None
src
,
dst
,
eid
=
self
.
edges
()
ret
=
nx
.
DiGraph
()
for
u
,
v
,
id
in
zip
(
src
,
dst
,
eid
):
ret
.
add_edge
(
u
,
v
,
id
=
id
)
return
ret
def
from_networkx
(
self
,
nx_graph
):
"""Convert from networkx graph.
_init_api
(
"dgl.graph"
)
If 'id' edge attribute exists, the edge will be added follows
the edge id order. Otherwise, order is undefined.
Parameters
----------
nx_graph : networkx.DiGraph
The nx graph
"""
self
.
clear
()
num_nodes
=
nx_graph
.
number_of_nodes
()
self
.
add_nodes
(
num_nodes
)
has_edge_id
=
'id'
in
next
(
iter
(
nx_graph
.
edges
))
if
has_edge_id
:
num_edges
=
nx_graph
.
number_of_edges
()
src
=
np
.
zeros
((
num_edges
,),
dtype
=
np
.
int64
)
dst
=
np
.
zeros
((
num_edges
,),
dtype
=
np
.
int64
)
for
e
,
attr
in
nx_graph
.
edges
.
items
:
u
,
v
=
e
eid
=
attr
[
'id'
]
src
[
eid
]
=
u
dst
[
eid
]
=
v
else
:
src
=
[]
dst
=
[]
for
u
,
v
in
nx_graph
.
edges
:
src
.
append
(
u
)
dst
.
append
(
v
)
src
=
utils
.
toindex
(
src
)
dst
=
utils
.
toindex
(
dst
)
self
.
add_edges
(
src
,
dst
)
_init_api
(
"dgl.graph_index"
)
python/dgl/ndarray.py
View file @
a1038eb1
"""DGL Runtime NDArray API.
dgl.ndarray provides a minimum runtime array
API to unify
different array libraries used as backend
.
dgl.ndarray provides a minimum runtime array
structure to be
used with C++ library
.
"""
# pylint: disable=invalid-name,unused-import
from
__future__
import
absolute_import
as
_abs
import
ctypes
import
operator
import
numpy
as
_np
from
._ffi.ndarray
import
TVMContext
,
TVMType
,
NDArrayBase
from
._ffi.ndarray
import
context
,
empty
,
from_dlpack
from
._ffi.ndarray
import
context
,
empty
,
from_dlpack
,
numpyasarray
from
._ffi.ndarray
import
_set_class_ndarray
from
.
import
backend
as
F
class
NDArray
(
NDArrayBase
):
"""Lightweight NDArray class for DGL framework."""
pass
def
__len__
(
self
):
return
reduce
(
operator
.
mul
,
self
.
shape
,
1
)
def
cpu
(
dev_id
=
0
):
"""Construct a CPU device
...
...
@@ -66,4 +71,38 @@ def array(arr, ctx=cpu(0)):
arr
=
_np
.
array
(
arr
)
return
empty
(
arr
.
shape
,
arr
.
dtype
,
ctx
).
copyfrom
(
arr
)
def
from_numpy
(
np_data
):
"""Create an array that shares the given numpy data.
Parameters
----------
np_data : numpy.ndarray
The numpy data
Returns
-------
NDArray
The array
"""
arr
,
_
=
numpyasarray
(
np_data
)
handle
=
ctypes
.
pointer
(
arr
)
return
NDArray
(
handle
,
is_view
=
True
)
def
from_user_tensor
(
data
):
"""Create an array that shares the given user tensor data.
Parameters
----------
data : F.Tensor
The user tensor data.
Returns
-------
NDArray
The array
"""
arr
=
F
.
astvmarray
(
data
)
handle
=
ctypes
.
pointer
(
arr
)
return
NDArray
(
handle
,
is_view
=
True
)
_set_class_ndarray
(
NDArray
)
python/dgl/utils.py
View file @
a1038eb1
...
...
@@ -7,48 +7,68 @@ import numpy as np
from
.
import
backend
as
F
from
.backend
import
Tensor
,
SparseTensor
def
is_id_tensor
(
u
):
"""Return whether the input is a supported id tensor."""
return
isinstance
(
u
,
Tensor
)
and
F
.
isinteger
(
u
)
and
len
(
F
.
shape
(
u
))
==
1
def
is_id_container
(
u
):
"""Return whether the input is a supported id container."""
return
(
getattr
(
u
,
'__iter__'
,
None
)
is
not
None
and
getattr
(
u
,
'__len__'
,
None
)
is
not
None
)
from
.
import
ndarray
as
nd
class
Index
(
object
):
"""Index class that can be easily converted to list/tensor."""
def
__init__
(
self
,
data
):
self
.
_list_data
=
None
self
.
_tensor_data
=
None
self
.
_
ctx_data
=
dict
()
self
.
_list_data
=
None
# a numpy type data
self
.
_
user_
tensor_data
=
dict
()
# dictionary of user tensors
self
.
_
dgl_tensor_data
=
None
# a dgl ndarray
self
.
_dispatch
(
data
)
def
_dispatch
(
self
,
data
):
if
is_id_tensor
(
data
):
self
.
_tensor_data
=
data
elif
is_id_container
(
data
):
self
.
_list_data
=
data
"""Store data based on its type."""
if
isinstance
(
data
,
Tensor
):
if
not
(
F
.
dtype
(
data
)
==
F
.
int64
and
len
(
F
.
shape
(
data
))
==
1
):
raise
ValueError
(
'Index data must be 1D int64 vector, but got: %s'
%
str
(
data
))
self
.
_user_tensor_data
[
F
.
get_context
(
data
)]
=
data
elif
isinstance
(
data
,
nd
.
NDArray
):
if
not
(
data
.
dtype
==
'int64'
and
len
(
data
.
shape
)
==
1
):
raise
ValueError
(
'Index data must be 1D int64 vector, but got: %s'
%
str
(
data
))
self
.
_dgl_tensor_data
=
data
else
:
try
:
self
.
_list_data
=
[
int
(
data
)]
self
.
_list_data
=
np
.
array
([
int
(
data
)]).
astype
(
np
.
int64
)
except
:
try
:
self
.
_list_data
=
np
.
array
(
data
).
astype
(
np
.
int64
)
except
:
raise
Typ
eError
(
'Error index data: %s'
%
str
(
x
))
raise
Valu
eError
(
'Error index data: %s'
%
str
(
data
))
def
tolist
(
self
):
"""Convert to a python-list compatible object."""
if
self
.
_list_data
is
None
:
self
.
_list_data
=
list
(
F
.
asnumpy
(
self
.
_tensor_data
))
if
self
.
_dgl_tensor_data
is
not
None
:
self
.
_list_data
=
self
.
_dgl_tensor_data
.
asnumpy
()
else
:
assert
len
(
self
.
_user_tensor_data
)
>
0
data
=
next
(
iter
(
self
.
_user_tensor_data
.
values
()))
self
.
_list_data
=
F
.
asnumpy
(
data
)
return
self
.
_list_data
def
totensor
(
self
,
ctx
=
None
):
if
self
.
_tensor_data
is
None
:
self
.
_tensor_data
=
F
.
tensor
(
self
.
_list_data
,
dtype
=
F
.
int64
)
def
tousertensor
(
self
,
ctx
=
None
):
"""Convert to user tensor (defined in `backend`)."""
if
len
(
self
.
_user_tensor_data
)
==
0
:
self
.
_user_tensor_data
[
nd
.
cpu
()]
=
F
.
from_numpy
(
self
.
tolist
())
if
ctx
is
None
:
return
self
.
_tensor_data
if
ctx
not
in
self
.
_ctx_data
:
self
.
_ctx_data
[
ctx
]
=
F
.
to_context
(
self
.
_tensor_data
,
ctx
)
return
self
.
_ctx_data
[
ctx
]
ctx
=
nd
.
cpu
()
if
ctx
not
in
self
.
_user_tensor_data
:
data
=
next
(
iter
(
self
.
_user_tensor_data
.
values
()))
self
.
_user_tensor_data
[
ctx
]
=
F
.
to_context
(
data
,
ctx
)
return
self
.
_user_tensor_data
[
ctx
]
def
todgltensor
(
self
):
"""Convert to dgl.NDArray."""
if
self
.
_dgl_tensor_data
is
None
:
if
self
.
_list_data
is
not
None
:
# create a view ndarray from numpy
self
.
_dgl_tensor_data
=
nd
.
from_numpy
(
self
.
_list_data
)
else
:
# create a view ndarray from user tensor
self
.
_dgl_tensor_data
=
nd
.
from_user_tensor
(
self
.
tousertensor
(
ctx
=
nd
.
cpu
()))
return
self
.
_dgl_tensor_data
def
__iter__
(
self
):
return
iter
(
self
.
tolist
())
...
...
@@ -56,8 +76,11 @@ class Index(object):
def
__len__
(
self
):
if
self
.
_list_data
is
not
None
:
return
len
(
self
.
_list_data
)
elif
len
(
self
.
_user_tensor_data
)
>
0
:
data
=
next
(
iter
(
self
.
_user_tensor_data
.
values
()))
return
len
(
data
)
else
:
return
len
(
self
.
_tensor_data
)
return
len
(
self
.
_
dgl_
tensor_data
)
def
__getitem__
(
self
,
i
):
return
self
.
tolist
()[
i
]
...
...
@@ -125,33 +148,6 @@ def edge_broadcasting(u, v):
assert
len
(
u
)
==
len
(
v
)
return
u
,
v
'''
def convert_to_id_container(x):
if is_id_container(x):
return x
elif is_id_tensor(x):
return F.asnumpy(x)
else:
try:
return [int(x)]
except:
raise TypeError('Error node: %s' % str(x))
return None
def convert_to_id_tensor(x, ctx=None):
if is_id_container(x):
ret = F.tensor(x, dtype=F.int64)
elif is_id_tensor(x):
ret = x
else:
try:
ret = F.tensor([int(x)], dtype=F.int64)
except:
raise TypeError('Error node: %s' % str(x))
ret = F.to_context(ret, ctx)
return ret
'''
class
LazyDict
(
Mapping
):
"""A readonly dictionary that does not materialize the storage."""
def
__init__
(
self
,
fn
,
keys
):
...
...
src/graph/graph_apis.cc
View file @
a1038eb1
...
...
@@ -9,7 +9,7 @@ using tvm::runtime::PackedFunc;
namespace
dgl
{
namespace
{
/*!\brief Convert EdgeArray structure to PackedFunc */
PackedFunc
ConvertEdgeArrayToPackedFunc
(
const
Graph
::
EdgeArray
&
ea
)
{
auto
body
=
[
ea
]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
int
which
=
args
[
0
];
...
...
@@ -26,25 +26,34 @@ PackedFunc ConvertEdgeArrayToPackedFunc(const Graph::EdgeArray& ea) {
return
PackedFunc
(
body
);
}
DLManagedTensor
*
CreateTmpDLManagedTensor
(
const
TVMArgValue
&
arg
)
{
const
DLTensor
*
dl_tensor
=
arg
;
DLManagedTensor
*
ret
=
new
DLManagedTensor
();
ret
->
deleter
=
[]
(
DLManagedTensor
*
self
)
{
delete
self
;
};
ret
->
manager_ctx
=
nullptr
;
ret
->
dl_tensor
=
*
dl_tensor
;
return
ret
;
}
}
// namespace
// Graph handler type
typedef
void
*
GraphHandle
;
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphCreate"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphCreate"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
new
Graph
();
*
rv
=
ghandle
;
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphFree"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphFree"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
delete
gptr
;
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphAddVertices"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphAddVertices"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -52,7 +61,7 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphAddVertices")
gptr
->
AddVertices
(
num_vertices
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphAddEdge"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphAddEdge"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -61,37 +70,37 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphAddEdge")
gptr
->
AddEdge
(
src
,
dst
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphAddEdges"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphAddEdges"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
src
=
args
[
1
];
const
IdArray
dst
=
args
[
2
];
const
IdArray
src
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
const
IdArray
dst
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
2
]
))
;
gptr
->
AddEdges
(
src
,
dst
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphClear"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphClear"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
gptr
->
Clear
();
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphNumVertices"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphNumVertices"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
*
rv
=
static_cast
<
int64_t
>
(
gptr
->
NumVertices
());
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphNumEdges"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphNumEdges"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
*
rv
=
static_cast
<
int64_t
>
(
gptr
->
NumEdges
());
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphHasVertex"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphHasVertex"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -99,15 +108,15 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphHasVertex")
*
rv
=
gptr
->
HasVertex
(
vid
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphHasVertices"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphHasVertices"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
vids
=
args
[
1
];
const
IdArray
vids
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
*
rv
=
gptr
->
HasVertices
(
vids
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphHasEdge"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphHasEdge"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -116,16 +125,16 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphHasEdge")
*
rv
=
gptr
->
HasEdge
(
src
,
dst
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphHasEdges"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphHasEdges"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
src
=
args
[
1
];
const
IdArray
dst
=
args
[
2
];
const
IdArray
src
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
const
IdArray
dst
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
2
]
))
;
*
rv
=
gptr
->
HasEdges
(
src
,
dst
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphPredecessors"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphPredecessors"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -134,7 +143,7 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphPredecessors")
*
rv
=
gptr
->
Predecessors
(
vid
,
radius
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphSuccessors"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphSuccessors"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -143,7 +152,7 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphSuccessors")
*
rv
=
gptr
->
Successors
(
vid
,
radius
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphEdgeId"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphEdgeId"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -152,16 +161,16 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphEdgeId")
*
rv
=
static_cast
<
int64_t
>
(
gptr
->
EdgeId
(
src
,
dst
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphEdgeIds"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphEdgeIds"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
src
=
args
[
1
];
const
IdArray
dst
=
args
[
2
];
const
IdArray
src
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
const
IdArray
dst
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
2
]
))
;
*
rv
=
gptr
->
EdgeIds
(
src
,
dst
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphInEdges_1"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphInEdges_1"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -169,15 +178,15 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphInEdges_1")
*
rv
=
ConvertEdgeArrayToPackedFunc
(
gptr
->
InEdges
(
vid
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphInEdges_2"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphInEdges_2"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
vids
=
args
[
1
];
const
IdArray
vids
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
*
rv
=
ConvertEdgeArrayToPackedFunc
(
gptr
->
InEdges
(
vids
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphOutEdges_1"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphOutEdges_1"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -185,15 +194,15 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphOutEdges_1")
*
rv
=
ConvertEdgeArrayToPackedFunc
(
gptr
->
OutEdges
(
vid
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphOutEdges_2"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphOutEdges_2"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
vids
=
args
[
1
];
const
IdArray
vids
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
*
rv
=
ConvertEdgeArrayToPackedFunc
(
gptr
->
OutEdges
(
vids
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphEdges"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphEdges"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -201,7 +210,7 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphEdges")
*
rv
=
ConvertEdgeArrayToPackedFunc
(
gptr
->
Edges
(
sorted
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphInDegree"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphInDegree"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -209,15 +218,15 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphInDegree")
*
rv
=
static_cast
<
int64_t
>
(
gptr
->
InDegree
(
vid
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphInDegrees"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphInDegrees"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
vids
=
args
[
1
];
const
IdArray
vids
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
*
rv
=
gptr
->
InDegrees
(
vids
);
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphOutDegree"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphOutDegree"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
...
...
@@ -225,11 +234,11 @@ TVM_REGISTER_GLOBAL("graph._CAPI_DGLGraphOutDegree")
*
rv
=
static_cast
<
int64_t
>
(
gptr
->
OutDegree
(
vid
));
});
TVM_REGISTER_GLOBAL
(
"graph._CAPI_DGLGraphOutDegrees"
)
TVM_REGISTER_GLOBAL
(
"graph
_index
._CAPI_DGLGraphOutDegrees"
)
.
set_body
([]
(
TVMArgs
args
,
TVMRetValue
*
rv
)
{
GraphHandle
ghandle
=
args
[
0
];
const
Graph
*
gptr
=
static_cast
<
Graph
*>
(
ghandle
);
const
IdArray
vids
=
args
[
1
];
const
IdArray
vids
=
IdArray
::
FromDLPack
(
CreateTmpDLManagedTensor
(
args
[
1
]
))
;
*
rv
=
gptr
->
OutDegrees
(
vids
);
});
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment