Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
3683a774
Commit
3683a774
authored
Oct 18, 2018
by
Gan Quan
Browse files
Merge branch 'cpp' of github.com:jermainewang/dgl into cpp
parents
f1ede61f
c9e3c658
Changes
45
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
80 additions
and
128 deletions
+80
-128
tests/pytorch/test_function.py
tests/pytorch/test_function.py
+0
-86
tests/pytorch/test_line_graph.py
tests/pytorch/test_line_graph.py
+5
-9
tests/pytorch/test_specialization.py
tests/pytorch/test_specialization.py
+34
-33
tests/scripts/task_lint.sh
tests/scripts/task_lint.sh
+5
-0
tests/scripts/test_examples.sh
tests/scripts/test_examples.sh
+36
-0
No files found.
tests/pytorch/test_function.py
View file @
3683a774
import
torch
as
th
import
dgl
import
dgl.function
as
fn
from
dgl.graph
import
__REPR__
def
generate_graph
():
g
=
dgl
.
DGLGraph
()
...
...
@@ -37,18 +36,9 @@ def generate_graph1():
g
.
set_e_repr
(
h
)
return
g
def
reducer_msg
(
node
,
msgs
):
return
th
.
sum
(
msgs
[
'm'
],
1
)
def
reducer_out
(
node
,
msgs
):
return
{
'h'
:
th
.
sum
(
msgs
,
1
)}
def
reducer_both
(
node
,
msgs
):
return
{
'h'
:
th
.
sum
(
msgs
[
'm'
],
1
)}
def
reducer_none
(
node
,
msgs
):
return
th
.
sum
(
msgs
,
1
)
def
test_copy_src
():
# copy_src with both fields
g
=
generate_graph
()
...
...
@@ -58,30 +48,6 @@ def test_copy_src():
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy_src with only src field; the out field should use anonymous repr
g
=
generate_graph
()
g
.
register_message_func
(
fn
.
copy_src
(
src
=
'h'
))
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy_src with no src field; should use anonymous repr
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
copy_src
(
out
=
'm'
))
g
.
register_reduce_func
(
reducer_both
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy src with no fields;
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
copy_src
())
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
def
test_copy_edge
():
# copy_edge with both fields
g
=
generate_graph
()
...
...
@@ -91,30 +57,6 @@ def test_copy_edge():
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy_edge with only edge field; the out field should use anonymous repr
g
=
generate_graph
()
g
.
register_message_func
(
fn
.
copy_edge
(
edge
=
'h'
))
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy_edge with no edge field; should use anonymous repr
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
copy_edge
(
out
=
'm'
))
g
.
register_reduce_func
(
reducer_both
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
# copy edge with no fields;
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
copy_edge
())
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
10.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
44.
]))
def
test_src_mul_edge
():
# src_mul_edge with all fields
g
=
generate_graph
()
...
...
@@ -124,34 +66,6 @@ def test_src_mul_edge():
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
100.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
284.
]))
g
=
generate_graph
()
g
.
register_message_func
(
fn
.
src_mul_edge
(
src
=
'h'
,
edge
=
'h'
))
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
100.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
284.
]))
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
src_mul_edge
(
out
=
'm'
))
g
.
register_reduce_func
(
reducer_both
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
100.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
284.
]))
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
src_mul_edge
())
g
.
register_reduce_func
(
reducer_out
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
()[
'h'
],
th
.
tensor
([
100.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
284.
]))
g
=
generate_graph1
()
g
.
register_message_func
(
fn
.
src_mul_edge
())
g
.
register_reduce_func
(
reducer_none
)
g
.
update_all
()
assert
th
.
allclose
(
g
.
get_n_repr
(),
th
.
tensor
([
100.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
1.
,
284.
]))
if
__name__
==
'__main__'
:
test_copy_src
()
test_copy_edge
()
...
...
tests/pytorch/test_line_graph.py
View file @
3683a774
...
...
@@ -5,35 +5,31 @@ import dgl
D
=
5
def
check_eq
(
a
,
b
):
return
a
.
shape
==
b
.
shape
and
np
.
allclose
(
a
.
numpy
(),
b
.
numpy
())
def
test_line_graph
():
N
=
5
G
=
dgl
.
DGLGraph
(
nx
.
star_graph
(
N
))
G
.
set_e_repr
(
th
.
randn
((
2
*
N
,
D
)))
G
.
set_e_repr
(
{
'h'
:
th
.
randn
((
2
*
N
,
D
))
}
)
n_edges
=
G
.
number_of_edges
()
L
=
G
.
line_graph
(
shared
=
True
)
assert
L
.
number_of_nodes
()
==
2
*
N
L
.
set_n_repr
(
th
.
randn
((
2
*
N
,
D
)))
L
.
set_n_repr
(
{
'h'
:
th
.
randn
((
2
*
N
,
D
))
}
)
# update node features on line graph should reflect to edge features on
# original graph.
u
=
[
0
,
0
,
2
,
3
]
v
=
[
1
,
2
,
0
,
0
]
eid
=
G
.
edge_ids
(
u
,
v
)
L
.
set_n_repr
(
th
.
zeros
((
4
,
D
)),
eid
)
assert
check_eq
(
G
.
get_e_repr
(
u
,
v
),
th
.
zeros
((
4
,
D
)))
L
.
set_n_repr
(
{
'h'
:
th
.
zeros
((
4
,
D
))
}
,
eid
)
assert
th
.
allclose
(
G
.
get_e_repr
(
u
,
v
)
[
'h'
]
,
th
.
zeros
((
4
,
D
)))
# adding a new node feature on line graph should also reflect to a new
# edge feature on original graph
data
=
th
.
randn
(
n_edges
,
D
)
L
.
set_n_repr
({
'w'
:
data
})
assert
check_eq
(
G
.
get_e_repr
()[
'w'
],
data
)
assert
th
.
allclose
(
G
.
get_e_repr
()[
'w'
],
data
)
def
test_no_backtracking
():
N
=
5
G
=
dgl
.
DGLGraph
(
nx
.
star_graph
(
N
))
G
.
set_e_repr
(
th
.
randn
((
2
*
N
,
D
)))
L
=
G
.
line_graph
(
backtracking
=
False
)
assert
L
.
number_of_nodes
()
==
2
*
N
for
i
in
range
(
1
,
N
):
...
...
tests/pytorch/test_specialization.py
View file @
3683a774
...
...
@@ -22,23 +22,23 @@ def generate_graph():
def
test_update_all
():
def
_test
(
fld
):
def
message_func
(
hu
,
edge
):
return
hu
[
fld
]
return
{
'm'
:
hu
[
fld
]
}
def
message_func_edge
(
hu
,
edge
):
if
len
(
hu
[
fld
].
shape
)
==
1
:
return
hu
[
fld
]
*
edge
[
'e1'
]
return
{
'm'
:
hu
[
fld
]
*
edge
[
'e1'
]
}
else
:
return
hu
[
fld
]
*
edge
[
'e2'
]
return
{
'm'
:
hu
[
fld
]
*
edge
[
'e2'
]
}
def
reduce_func
(
hv
,
msgs
):
return
{
fld
:
th
.
sum
(
msgs
,
1
)}
return
{
fld
:
th
.
sum
(
msgs
[
'm'
]
,
1
)}
def
apply_func
(
hu
):
return
{
fld
:
2
*
hu
[
fld
]}
g
=
generate_graph
()
# update all
v1
=
g
.
get_n_repr
()[
fld
]
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v2
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
update_all
(
message_func
,
reduce_func
,
apply_func
)
...
...
@@ -46,12 +46,12 @@ def test_update_all():
assert
th
.
allclose
(
v2
,
v3
)
# update all with edge weights
v1
=
g
.
get_n_repr
()[
fld
]
g
.
update_all
(
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
update_all
(
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v2
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
update_all
(
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
update_all
(
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v3
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
update_all
(
message_func_edge
,
reduce_func
,
apply_func
)
...
...
@@ -68,42 +68,40 @@ def test_send_and_recv():
v
=
th
.
tensor
([
1
,
2
,
3
,
9
,
9
,
0
])
def
_test
(
fld
):
def
message_func
(
hu
,
edge
):
return
hu
[
fld
]
return
{
'm'
:
hu
[
fld
]
}
def
message_func_edge
(
hu
,
edge
):
if
len
(
hu
[
fld
].
shape
)
==
1
:
return
hu
[
fld
]
*
edge
[
'e1'
]
return
{
'm'
:
hu
[
fld
]
*
edge
[
'e1'
]
}
else
:
return
hu
[
fld
]
*
edge
[
'e2'
]
return
{
'm'
:
hu
[
fld
]
*
edge
[
'e2'
]
}
def
reduce_func
(
hv
,
msgs
):
return
{
fld
:
th
.
sum
(
msgs
,
1
)}
return
{
fld
:
th
.
sum
(
msgs
[
'm'
]
,
1
)}
def
apply_func
(
hu
):
return
{
fld
:
2
*
hu
[
fld
]}
g
=
generate_graph
()
# send and recv
v1
=
g
.
get_n_repr
()[
fld
]
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v2
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
send_and_recv
(
u
,
v
,
message_func
,
reduce_func
,
apply_func
)
g
.
send_and_recv
(
u
,
v
,
message_func
,
reduce_func
,
apply_func
)
v3
=
g
.
get_n_repr
()[
fld
]
assert
th
.
allclose
(
v2
,
v3
)
# send and recv with edge weights
v1
=
g
.
get_n_repr
()[
fld
]
g
.
send_and_recv
(
u
,
v
,
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
send_and_recv
(
u
,
v
,
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v2
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
send_and_recv
(
u
,
v
,
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
),
fn
.
sum
(
out
=
fld
),
apply_func
)
g
.
send_and_recv
(
u
,
v
,
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
fld
),
apply_func
)
v3
=
g
.
get_n_repr
()[
fld
]
g
.
set_n_repr
({
fld
:
v1
})
g
.
send_and_recv
(
u
,
v
,
message_func_edge
,
reduce_func
,
apply_func
)
g
.
send_and_recv
(
u
,
v
,
message_func_edge
,
reduce_func
,
apply_func
)
v4
=
g
.
get_n_repr
()[
fld
]
assert
th
.
allclose
(
v2
,
v3
)
assert
th
.
allclose
(
v3
,
v4
)
...
...
@@ -127,19 +125,19 @@ def test_update_all_multi_fn():
fld
=
'f2'
# update all, mix of builtin and UDF
g
.
update_all
([
fn
.
copy_src
(
src
=
fld
,
out
=
'm1'
),
message_func
],
[
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v1'
),
reduce_func
],
[
fn
.
sum
(
msg
=
'm1'
,
out
=
'v1'
),
reduce_func
],
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
v2
=
g
.
get_n_repr
()[
'v2'
]
assert
th
.
allclose
(
v1
,
v2
)
# run builtin with single message and reduce
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
),
fn
.
sum
(
out
=
'v1'
),
None
)
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
'v1'
),
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
assert
th
.
allclose
(
v1
,
v2
)
# 1 message, 2 reduces
, using anonymous repr
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
),
[
fn
.
sum
(
out
=
'v2'
),
fn
.
sum
(
out
=
'v3'
)],
None
)
# 1 message, 2 reduces
g
.
update_all
(
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
[
fn
.
sum
(
msg
=
'm'
,
out
=
'v2'
),
fn
.
sum
(
msg
=
'm'
,
out
=
'v3'
)],
None
)
v2
=
g
.
get_n_repr
()[
'v2'
]
v3
=
g
.
get_n_repr
()[
'v3'
]
assert
th
.
allclose
(
v1
,
v2
)
...
...
@@ -147,7 +145,7 @@ def test_update_all_multi_fn():
# update all with edge weights, 2 message, 3 reduces
g
.
update_all
([
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
,
out
=
'm1'
),
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
,
out
=
'm2'
)],
[
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v1'
),
fn
.
sum
(
msg
s
=
'm2'
,
out
=
'v2'
),
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v3'
)],
[
fn
.
sum
(
msg
=
'm1'
,
out
=
'v1'
),
fn
.
sum
(
msg
=
'm2'
,
out
=
'v2'
),
fn
.
sum
(
msg
=
'm1'
,
out
=
'v3'
)],
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
v2
=
g
.
get_n_repr
()[
'v2'
]
...
...
@@ -181,20 +179,23 @@ def test_send_and_recv_multi_fn():
# send and recv, mix of builtin and UDF
g
.
send_and_recv
(
u
,
v
,
[
fn
.
copy_src
(
src
=
fld
,
out
=
'm1'
),
message_func
],
[
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v1'
),
reduce_func
],
[
fn
.
sum
(
msg
=
'm1'
,
out
=
'v1'
),
reduce_func
],
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
v2
=
g
.
get_n_repr
()[
'v2'
]
assert
th
.
allclose
(
v1
,
v2
)
# run builtin with single message and reduce
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
),
fn
.
sum
(
out
=
'v1'
),
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
fn
.
sum
(
msg
=
'm'
,
out
=
'v1'
),
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
assert
th
.
allclose
(
v1
,
v2
)
# 1 message, 2 reduces, using anonymous repr
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
),
[
fn
.
sum
(
out
=
'v2'
),
fn
.
sum
(
out
=
'v3'
)],
None
)
# 1 message, 2 reduces
g
.
send_and_recv
(
u
,
v
,
fn
.
copy_src
(
src
=
fld
,
out
=
'm'
),
[
fn
.
sum
(
msg
=
'm'
,
out
=
'v2'
),
fn
.
sum
(
msg
=
'm'
,
out
=
'v3'
)],
None
)
v2
=
g
.
get_n_repr
()[
'v2'
]
v3
=
g
.
get_n_repr
()[
'v3'
]
assert
th
.
allclose
(
v1
,
v2
)
...
...
@@ -203,7 +204,7 @@ def test_send_and_recv_multi_fn():
# send and recv with edge weights, 2 message, 3 reduces
g
.
send_and_recv
(
u
,
v
,
[
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e1'
,
out
=
'm1'
),
fn
.
src_mul_edge
(
src
=
fld
,
edge
=
'e2'
,
out
=
'm2'
)],
[
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v1'
),
fn
.
sum
(
msg
s
=
'm2'
,
out
=
'v2'
),
fn
.
sum
(
msg
s
=
'm1'
,
out
=
'v3'
)],
[
fn
.
sum
(
msg
=
'm1'
,
out
=
'v1'
),
fn
.
sum
(
msg
=
'm2'
,
out
=
'v2'
),
fn
.
sum
(
msg
=
'm1'
,
out
=
'v3'
)],
None
)
v1
=
g
.
get_n_repr
()[
'v1'
]
v2
=
g
.
get_n_repr
()[
'v2'
]
...
...
tests/scripts/task_lint.sh
0 → 100755
View file @
3683a774
#!/bin/sh
# cpplint
echo
'Checking code style of C++ codes...'
python3 third_party/dmlc-core/scripts/lint.py dgl cpp include src
tests/scripts/test_examples.sh
0 → 100755
View file @
3683a774
#!/bin/bash
GCN_EXAMPLE_DIR
=
"../../examples/pytorch/gcn"
function
fail
{
echo
FAIL:
$@
exit
-1
}
function
usage
{
echo
"Usage:
$0
[CPU|GPU]"
}
# check arguments
if
[
$#
-ne
1
]
;
then
usage
fail
"Error: must specify device"
fi
if
[
"
$1
"
==
"CPU"
]
;
then
dev
=
-1
elif
[
"
$1
"
==
"GPU"
]
;
then
export
CUDA_VISIBLE_DEVICES
=
0
dev
=
0
else
usage
fail
"Unknown device
$1
"
fi
pushd
$GCN_EXAMPLE_DIR
>
/dev/null
# test CPU
python3 gcn.py
--dataset
cora
--gpu
$dev
||
fail
"run gcn.py on
$1
"
python3 gcn_spmv.py
--dataset
cora
--gpu
$dev
||
fail
"run gcn_spmv.py on
$1
"
popd
>
/dev/null
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment