Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
9fe5092c
Unverified
Commit
9fe5092c
authored
Feb 19, 2023
by
Hongzhi (Steve), Chen
Committed by
GitHub
Feb 19, 2023
Browse files
lintrunner (#5326)
Co-authored-by:
Ubuntu
<
ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal
>
parent
9836f78e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
173 additions
and
176 deletions
+173
-176
examples/pytorch/capsule/DGLRoutingLayer.py
examples/pytorch/capsule/DGLRoutingLayer.py
+82
-84
examples/pytorch/capsule/simple_routing.py
examples/pytorch/capsule/simple_routing.py
+40
-41
examples/pytorch/rect/utils.py
examples/pytorch/rect/utils.py
+51
-51
No files found.
examples/pytorch/capsule/DGLRoutingLayer.py
View file @
9fe5092c
import
dgl
import
torch
as
th
import
torch.nn
as
nn
import
torch.nn.functional
as
F
class
DGLRoutingLayer
(
nn
.
Module
):
def
__init__
(
self
,
in_nodes
,
out_nodes
,
f_size
,
batch_size
=
0
,
device
=
"cpu"
):
super
(
DGLRoutingLayer
,
self
).
__init__
()
self
.
batch_size
=
batch_size
self
.
g
=
init_graph
(
in_nodes
,
out_nodes
,
f_size
,
device
=
device
)
self
.
in_nodes
=
in_nodes
self
.
out_nodes
=
out_nodes
self
.
in_indx
=
list
(
range
(
in_nodes
))
self
.
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
self
.
device
=
device
def
forward
(
self
,
u_hat
,
routing_num
=
1
):
self
.
g
.
edata
[
"u_hat"
]
=
u_hat
batch_size
=
self
.
batch_size
# step 2 (line 5)
def
cap_message
(
edges
):
if
batch_size
:
return
{
"m"
:
edges
.
data
[
"c"
].
unsqueeze
(
1
)
*
edges
.
data
[
"u_hat"
]}
else
:
return
{
"m"
:
edges
.
data
[
"c"
]
*
edges
.
data
[
"u_hat"
]}
def
cap_reduce
(
nodes
):
return
{
"s"
:
th
.
sum
(
nodes
.
mailbox
[
"m"
],
dim
=
1
)}
for
r
in
range
(
routing_num
):
# step 1 (line 4): normalize over out edges
edges_b
=
self
.
g
.
edata
[
"b"
].
view
(
self
.
in_nodes
,
self
.
out_nodes
)
self
.
g
.
edata
[
"c"
]
=
F
.
softmax
(
edges_b
,
dim
=
1
).
view
(
-
1
,
1
)
# Execute step 1 & 2
self
.
g
.
update_all
(
message_func
=
cap_message
,
reduce_func
=
cap_reduce
)
# step 3 (line 6)
if
self
.
batch_size
:
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]
=
squash
(
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"s"
],
dim
=
2
)
else
:
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]
=
squash
(
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"s"
],
dim
=
1
)
# step 4 (line 7)
v
=
th
.
cat
(
[
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]]
*
self
.
in_nodes
,
dim
=
0
)
if
self
.
batch_size
:
self
.
g
.
edata
[
"b"
]
=
self
.
g
.
edata
[
"b"
]
+
(
self
.
g
.
edata
[
"u_hat"
]
*
v
).
mean
(
dim
=
1
).
sum
(
dim
=
1
,
keepdim
=
True
)
else
:
self
.
g
.
edata
[
"b"
]
=
self
.
g
.
edata
[
"b"
]
+
(
self
.
g
.
edata
[
"u_hat"
]
*
v
).
sum
(
dim
=
1
,
keepdim
=
True
)
def
squash
(
s
,
dim
=
1
):
sq
=
th
.
sum
(
s
**
2
,
dim
=
dim
,
keepdim
=
True
)
s_norm
=
th
.
sqrt
(
sq
)
s
=
(
sq
/
(
1.0
+
sq
))
*
(
s
/
s_norm
)
return
s
def
init_graph
(
in_nodes
,
out_nodes
,
f_size
,
device
=
"cpu"
):
g
=
dgl
.
DGLGraph
()
g
.
set_n_initializer
(
dgl
.
frame
.
zero_initializer
)
all_nodes
=
in_nodes
+
out_nodes
g
.
add_nodes
(
all_nodes
)
in_indx
=
list
(
range
(
in_nodes
))
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
# add edges use edge broadcasting
for
u
in
in_indx
:
g
.
add_edges
(
u
,
out_indx
)
g
=
g
.
to
(
device
)
g
.
edata
[
"b"
]
=
th
.
zeros
(
in_nodes
*
out_nodes
,
1
).
to
(
device
)
return
g
import
dgl
import
torch
as
th
import
torch.nn
as
nn
import
torch.nn.functional
as
F
class
DGLRoutingLayer
(
nn
.
Module
):
def
__init__
(
self
,
in_nodes
,
out_nodes
,
f_size
,
batch_size
=
0
,
device
=
"cpu"
):
super
(
DGLRoutingLayer
,
self
).
__init__
()
self
.
batch_size
=
batch_size
self
.
g
=
init_graph
(
in_nodes
,
out_nodes
,
f_size
,
device
=
device
)
self
.
in_nodes
=
in_nodes
self
.
out_nodes
=
out_nodes
self
.
in_indx
=
list
(
range
(
in_nodes
))
self
.
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
self
.
device
=
device
def
forward
(
self
,
u_hat
,
routing_num
=
1
):
self
.
g
.
edata
[
"u_hat"
]
=
u_hat
batch_size
=
self
.
batch_size
# step 2 (line 5)
def
cap_message
(
edges
):
if
batch_size
:
return
{
"m"
:
edges
.
data
[
"c"
].
unsqueeze
(
1
)
*
edges
.
data
[
"u_hat"
]}
else
:
return
{
"m"
:
edges
.
data
[
"c"
]
*
edges
.
data
[
"u_hat"
]}
def
cap_reduce
(
nodes
):
return
{
"s"
:
th
.
sum
(
nodes
.
mailbox
[
"m"
],
dim
=
1
)}
for
r
in
range
(
routing_num
):
# step 1 (line 4): normalize over out edges
edges_b
=
self
.
g
.
edata
[
"b"
].
view
(
self
.
in_nodes
,
self
.
out_nodes
)
self
.
g
.
edata
[
"c"
]
=
F
.
softmax
(
edges_b
,
dim
=
1
).
view
(
-
1
,
1
)
# Execute step 1 & 2
self
.
g
.
update_all
(
message_func
=
cap_message
,
reduce_func
=
cap_reduce
)
# step 3 (line 6)
if
self
.
batch_size
:
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]
=
squash
(
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"s"
],
dim
=
2
)
else
:
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]
=
squash
(
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"s"
],
dim
=
1
)
# step 4 (line 7)
v
=
th
.
cat
(
[
self
.
g
.
nodes
[
self
.
out_indx
].
data
[
"v"
]]
*
self
.
in_nodes
,
dim
=
0
)
if
self
.
batch_size
:
self
.
g
.
edata
[
"b"
]
=
self
.
g
.
edata
[
"b"
]
+
(
self
.
g
.
edata
[
"u_hat"
]
*
v
).
mean
(
dim
=
1
).
sum
(
dim
=
1
,
keepdim
=
True
)
else
:
self
.
g
.
edata
[
"b"
]
=
self
.
g
.
edata
[
"b"
]
+
(
self
.
g
.
edata
[
"u_hat"
]
*
v
).
sum
(
dim
=
1
,
keepdim
=
True
)
def
squash
(
s
,
dim
=
1
):
sq
=
th
.
sum
(
s
**
2
,
dim
=
dim
,
keepdim
=
True
)
s_norm
=
th
.
sqrt
(
sq
)
s
=
(
sq
/
(
1.0
+
sq
))
*
(
s
/
s_norm
)
return
s
def
init_graph
(
in_nodes
,
out_nodes
,
f_size
,
device
=
"cpu"
):
g
=
dgl
.
DGLGraph
()
g
.
set_n_initializer
(
dgl
.
frame
.
zero_initializer
)
all_nodes
=
in_nodes
+
out_nodes
g
.
add_nodes
(
all_nodes
)
in_indx
=
list
(
range
(
in_nodes
))
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
# add edges use edge broadcasting
for
u
in
in_indx
:
g
.
add_edges
(
u
,
out_indx
)
g
=
g
.
to
(
device
)
g
.
edata
[
"b"
]
=
th
.
zeros
(
in_nodes
*
out_nodes
,
1
).
to
(
device
)
return
g
examples/pytorch/capsule/simple_routing.py
View file @
9fe5092c
import
dgl
import
torch
as
th
import
torch.nn
as
nn
from
DGLRoutingLayer
import
DGLRoutingLayer
from
torch.nn
import
functional
as
F
g
=
dgl
.
DGLGraph
()
g
.
graph_data
=
{}
in_nodes
=
20
out_nodes
=
10
g
.
graph_data
[
"in_nodes"
]
=
in_nodes
g
.
graph_data
[
"out_nodes"
]
=
out_nodes
all_nodes
=
in_nodes
+
out_nodes
g
.
add_nodes
(
all_nodes
)
in_indx
=
list
(
range
(
in_nodes
))
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
g
.
graph_data
[
"in_indx"
]
=
in_indx
g
.
graph_data
[
"out_indx"
]
=
out_indx
# add edges use edge broadcasting
for
u
in
out_indx
:
g
.
add_edges
(
in_indx
,
u
)
# init states
f_size
=
4
g
.
ndata
[
"v"
]
=
th
.
zeros
(
all_nodes
,
f_size
)
g
.
edata
[
"u_hat"
]
=
th
.
randn
(
in_nodes
*
out_nodes
,
f_size
)
g
.
edata
[
"b"
]
=
th
.
randn
(
in_nodes
*
out_nodes
,
1
)
routing_layer
=
DGLRoutingLayer
(
g
)
entropy_list
=
[]
for
i
in
range
(
15
):
routing_layer
()
dist_matrix
=
g
.
edata
[
"c"
].
view
(
in_nodes
,
out_nodes
)
entropy
=
(
-
dist_matrix
*
th
.
log
(
dist_matrix
)).
sum
(
dim
=
0
)
entropy_list
.
append
(
entropy
.
data
.
numpy
())
std
=
dist_matrix
.
std
(
dim
=
0
)
import
dgl
import
torch
as
th
import
torch.nn
as
nn
from
DGLRoutingLayer
import
DGLRoutingLayer
from
torch.nn
import
functional
as
F
g
=
dgl
.
DGLGraph
()
g
.
graph_data
=
{}
in_nodes
=
20
out_nodes
=
10
g
.
graph_data
[
"in_nodes"
]
=
in_nodes
g
.
graph_data
[
"out_nodes"
]
=
out_nodes
all_nodes
=
in_nodes
+
out_nodes
g
.
add_nodes
(
all_nodes
)
in_indx
=
list
(
range
(
in_nodes
))
out_indx
=
list
(
range
(
in_nodes
,
in_nodes
+
out_nodes
))
g
.
graph_data
[
"in_indx"
]
=
in_indx
g
.
graph_data
[
"out_indx"
]
=
out_indx
# add edges use edge broadcasting
for
u
in
out_indx
:
g
.
add_edges
(
in_indx
,
u
)
# init states
f_size
=
4
g
.
ndata
[
"v"
]
=
th
.
zeros
(
all_nodes
,
f_size
)
g
.
edata
[
"u_hat"
]
=
th
.
randn
(
in_nodes
*
out_nodes
,
f_size
)
g
.
edata
[
"b"
]
=
th
.
randn
(
in_nodes
*
out_nodes
,
1
)
routing_layer
=
DGLRoutingLayer
(
g
)
entropy_list
=
[]
for
i
in
range
(
15
):
routing_layer
()
dist_matrix
=
g
.
edata
[
"c"
].
view
(
in_nodes
,
out_nodes
)
entropy
=
(
-
dist_matrix
*
th
.
log
(
dist_matrix
)).
sum
(
dim
=
0
)
entropy_list
.
append
(
entropy
.
data
.
numpy
())
std
=
dist_matrix
.
std
(
dim
=
0
)
examples/pytorch/rect/utils.py
View file @
9fe5092c
import
dgl
import
torch
from
dgl.data
import
CiteseerGraphDataset
,
CoraGraphDataset
def
load_data
(
args
):
if
args
.
dataset
==
"cora"
:
data
=
CoraGraphDataset
()
elif
args
.
dataset
==
"citeseer"
:
data
=
CiteseerGraphDataset
()
else
:
raise
ValueError
(
"Unknown dataset: {}"
.
format
(
args
.
dataset
))
g
=
data
[
0
]
if
args
.
gpu
<
0
:
cuda
=
False
else
:
cuda
=
True
g
=
g
.
int
().
to
(
args
.
gpu
)
features
=
g
.
ndata
[
"feat"
]
labels
=
g
.
ndata
[
"label"
]
train_mask
=
g
.
ndata
[
"train_mask"
]
test_mask
=
g
.
ndata
[
"test_mask"
]
g
=
dgl
.
add_self_loop
(
g
)
return
g
,
features
,
labels
,
train_mask
,
test_mask
,
data
.
num_classes
,
cuda
def
svd_feature
(
features
,
d
=
200
):
"""Get 200-dimensional node features, to avoid curse of dimensionality"""
if
features
.
shape
[
1
]
<=
d
:
return
features
U
,
S
,
VT
=
torch
.
svd
(
features
)
res
=
torch
.
mm
(
U
[:,
0
:
d
],
torch
.
diag
(
S
[
0
:
d
]))
return
res
def
process_classids
(
labels_temp
):
"""Reorder the remaining classes with unseen classes removed.
Input: the label only removing unseen classes
Output: the label with reordered classes
"""
labeldict
=
{}
num
=
0
for
i
in
labels_temp
:
labeldict
[
int
(
i
)]
=
1
labellist
=
sorted
(
labeldict
)
for
label
in
labellist
:
labeldict
[
int
(
label
)]
=
num
num
=
num
+
1
for
i
in
range
(
labels_temp
.
numel
()):
labels_temp
[
i
]
=
labeldict
[
int
(
labels_temp
[
i
])]
return
labels_temp
import
dgl
import
torch
from
dgl.data
import
CiteseerGraphDataset
,
CoraGraphDataset
def
load_data
(
args
):
if
args
.
dataset
==
"cora"
:
data
=
CoraGraphDataset
()
elif
args
.
dataset
==
"citeseer"
:
data
=
CiteseerGraphDataset
()
else
:
raise
ValueError
(
"Unknown dataset: {}"
.
format
(
args
.
dataset
))
g
=
data
[
0
]
if
args
.
gpu
<
0
:
cuda
=
False
else
:
cuda
=
True
g
=
g
.
int
().
to
(
args
.
gpu
)
features
=
g
.
ndata
[
"feat"
]
labels
=
g
.
ndata
[
"label"
]
train_mask
=
g
.
ndata
[
"train_mask"
]
test_mask
=
g
.
ndata
[
"test_mask"
]
g
=
dgl
.
add_self_loop
(
g
)
return
g
,
features
,
labels
,
train_mask
,
test_mask
,
data
.
num_classes
,
cuda
def
svd_feature
(
features
,
d
=
200
):
"""Get 200-dimensional node features, to avoid curse of dimensionality"""
if
features
.
shape
[
1
]
<=
d
:
return
features
U
,
S
,
VT
=
torch
.
svd
(
features
)
res
=
torch
.
mm
(
U
[:,
0
:
d
],
torch
.
diag
(
S
[
0
:
d
]))
return
res
def
process_classids
(
labels_temp
):
"""Reorder the remaining classes with unseen classes removed.
Input: the label only removing unseen classes
Output: the label with reordered classes
"""
labeldict
=
{}
num
=
0
for
i
in
labels_temp
:
labeldict
[
int
(
i
)]
=
1
labellist
=
sorted
(
labeldict
)
for
label
in
labellist
:
labeldict
[
int
(
label
)]
=
num
num
=
num
+
1
for
i
in
range
(
labels_temp
.
numel
()):
labels_temp
[
i
]
=
labeldict
[
int
(
labels_temp
[
i
])]
return
labels_temp
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment