Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
f52fc3fc
"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "af6c0fb7661faea7ef2dc598cc4b2bf63a943d04"
Commit
f52fc3fc
authored
Oct 01, 2018
by
GaiYu0
Browse files
experiment 6.1
parent
2b092811
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
74 additions
and
6 deletions
+74
-6
examples/pytorch/line_graph/dense_sbm.py
examples/pytorch/line_graph/dense_sbm.py
+21
-0
examples/pytorch/line_graph/gnn.py
examples/pytorch/line_graph/gnn.py
+2
-5
examples/pytorch/line_graph/sbm.py
examples/pytorch/line_graph/sbm.py
+1
-1
examples/pytorch/line_graph/train.py
examples/pytorch/line_graph/train.py
+50
-0
No files found.
examples/pytorch/line_graph/dense_sbm.py
0 → 100644
View file @
f52fc3fc
import
torch
as
th
def
sbm
(
y
,
p
,
q
):
"""
Parameters
----------
y: torch.Tensor (N, 1)
"""
i
=
(
y
==
y
.
t
()).
float
()
r
=
i
*
p
+
(
1
-
i
)
*
q
a
=
th
.
distributions
.
Bernoulli
(
r
).
sample
()
b
=
th
.
triu
(
a
)
+
th
.
triu
(
a
,
1
).
t
()
return
b
if
__name__
==
'__main__'
:
N
=
10000
y
=
th
.
ones
(
N
,
1
)
p
=
1
/
N
q
=
0
a
=
sbm
(
y
,
p
,
q
)
print
(
th
.
sum
(
a
))
examples/pytorch/line_graph/gnn.py
View file @
f52fc3fc
...
@@ -3,14 +3,11 @@ Supervised Community Detection with Hierarchical Graph Neural Networks
...
@@ -3,14 +3,11 @@ Supervised Community Detection with Hierarchical Graph Neural Networks
https://arxiv.org/abs/1705.08415
https://arxiv.org/abs/1705.08415
Deviations from paper:
Deviations from paper:
- Addition of global aggregation operator.
- Message passing is equivalent to `A^j \cdot X`, instead of `\min(1, A^j) \cdot X`.
- Message passing is equivalent to `A^j \cdot X`, instead of `\min(1, A^j) \cdot X`.
- Pm Pd
"""
"""
# TODO self-loop?
import
copy
import
copy
import
itertools
import
itertools
import
dgl
import
dgl
...
@@ -86,7 +83,7 @@ class GNN(nn.Module):
...
@@ -86,7 +83,7 @@ class GNN(nn.Module):
self
.
x
=
self
.
normalize
(
th
.
tensor
(
x
,
dtype
=
th
.
float
).
unsqueeze
(
1
))
self
.
x
=
self
.
normalize
(
th
.
tensor
(
x
,
dtype
=
th
.
float
).
unsqueeze
(
1
))
y
=
list
(
zip
(
*
lg
.
degree
))[
1
]
y
=
list
(
zip
(
*
lg
.
degree
))[
1
]
self
.
y
=
self
.
normalize
(
th
.
tensor
(
y
,
dtype
=
th
.
float
).
unsqueeze
(
1
))
self
.
y
=
self
.
normalize
(
th
.
tensor
(
y
,
dtype
=
th
.
float
).
unsqueeze
(
1
))
self
.
eid2nid
=
th
.
tensor
([
n
for
[[
_
,
n
],
_
]
in
lg
.
edges
])
self
.
eid2nid
=
th
.
tensor
([
int
(
n
)
for
[[
_
,
n
],
[
_
,
_
]
]
in
lg
.
edges
])
self
.
g
=
dgl
.
DGLGraph
(
g
)
self
.
g
=
dgl
.
DGLGraph
(
g
)
self
.
lg
=
dgl
.
DGLGraph
(
nx
.
convert_node_labels_to_integers
(
lg
))
self
.
lg
=
dgl
.
DGLGraph
(
nx
.
convert_node_labels_to_integers
(
lg
))
...
...
examples/pytorch/line_graph/sbm.py
View file @
f52fc3fc
...
@@ -32,7 +32,7 @@ class SSBM:
...
@@ -32,7 +32,7 @@ class SSBM:
self
.
a
=
a
*
math
.
log
(
n
)
/
n
self
.
a
=
a
*
math
.
log
(
n
)
/
n
self
.
b
=
b
*
math
.
log
(
n
)
/
n
self
.
b
=
b
*
math
.
log
(
n
)
/
n
elif
regime
==
'constant'
:
elif
regime
==
'constant'
:
snr
=
(
a
-
b
)
**
2
/
(
k
*
(
a
+
(
k
-
1
)
*
b
))
snr
=
(
a
-
b
)
**
2
/
(
k
*
(
a
+
(
k
+
1
)
*
b
))
if
snr
>
1
:
if
snr
>
1
:
print
(
'SSBM model with possible detection.'
)
print
(
'SSBM model with possible detection.'
)
else
:
else
:
...
...
examples/pytorch/line_graph/t
est
.py
→
examples/pytorch/line_graph/t
rain
.py
View file @
f52fc3fc
"""
"""
ipython3 t
est
.py --
--features 1 16 16
--gpu -1 --n-classes
5
--n-iterations 10
--n-nodes 10
--radius 3
ipython3 t
rain
.py -- --gpu -1 --n-classes
2
--n-iterations 10
00 --n-layers 30 --n-nodes 1000 --n-features 2
--radius 3
"""
"""
import
argparse
import
argparse
from
itertools
import
permutations
import
networkx
as
nx
import
networkx
as
nx
import
torch
as
th
import
torch
as
th
import
torch.nn.functional
as
F
import
torch.nn.functional
as
F
import
torch.optim
as
optim
import
torch.optim
as
optim
import
gnn
import
gnn
import
sbm
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--
features'
,
nargs
=
'+
'
,
type
=
int
)
parser
.
add_argument
(
'--
batch-size
'
,
type
=
int
)
parser
.
add_argument
(
'--gpu'
,
type
=
int
)
parser
.
add_argument
(
'--gpu'
,
type
=
int
)
parser
.
add_argument
(
'--n-classes'
,
type
=
int
)
parser
.
add_argument
(
'--n-classes'
,
type
=
int
)
parser
.
add_argument
(
'--n-features'
,
type
=
int
)
parser
.
add_argument
(
'--n-graphs'
,
type
=
int
)
parser
.
add_argument
(
'--n-iterations'
,
type
=
int
)
parser
.
add_argument
(
'--n-iterations'
,
type
=
int
)
parser
.
add_argument
(
'--n-layers'
,
type
=
int
)
parser
.
add_argument
(
'--n-nodes'
,
type
=
int
)
parser
.
add_argument
(
'--n-nodes'
,
type
=
int
)
parser
.
add_argument
(
'--radius'
,
type
=
int
)
parser
.
add_argument
(
'--radius'
,
type
=
int
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
if
args
.
gpu
<
0
:
dev
=
th
.
device
(
'cpu'
)
if
args
.
gpu
<
0
else
th
.
device
(
'cuda:%d'
%
args
.
gpu
)
cuda
=
False
else
:
cuda
=
True
th
.
cuda
.
set_device
(
args
.
gpu
)
g
=
nx
.
barabasi_albert_graph
(
args
.
n_nodes
,
1
).
to_directed
()
# TODO SBM
ssbm
=
sbm
.
SSBM
(
args
.
n_nodes
,
args
.
n_classes
,
1
,
1
)
y
=
th
.
multinomial
(
th
.
ones
(
args
.
n_classes
),
args
.
n_nodes
,
replacement
=
True
)
gg
=
[]
model
=
gnn
.
GNN
(
g
,
args
.
features
,
args
.
radius
,
args
.
n_classes
)
for
i
in
range
(
args
.
n_graphs
):
if
cuda
:
ssbm
.
generate
()
model
.
cuda
()
gg
.
append
(
ssbm
.
graph
)
opt
=
optim
.
Adam
(
model
.
parameters
())
assert
args
.
n_nodes
%
args
.
n_classes
==
0
ones
=
th
.
ones
(
int
(
args
.
n_nodes
/
args
.
n_classes
))
yy
=
[
th
.
cat
([
x
*
ones
for
x
in
p
]).
long
().
to
(
dev
)
for
p
in
permutations
(
range
(
args
.
n_classes
))]
feats
=
[
1
]
+
[
args
.
n_features
]
*
args
.
n_layers
+
[
args
.
n_classes
]
model
=
gnn
.
GNN
(
g
,
feats
,
args
.
radius
,
args
.
n_classes
).
to
(
dev
)
opt
=
optim
.
Adamax
(
model
.
parameters
(),
lr
=
0.04
)
for
i
in
range
(
args
.
n_iterations
):
for
i
in
range
(
args
.
n_iterations
):
y_bar
=
model
()
y_bar
=
model
()
loss
=
F
.
cross_entropy
(
y_bar
,
y
)
loss
=
min
(
F
.
cross_entropy
(
y_bar
,
y
)
for
y
in
yy
)
opt
.
zero_grad
()
opt
.
zero_grad
()
loss
.
backward
()
loss
.
backward
()
opt
.
step
()
opt
.
step
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment