Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
d0313326
"examples/vscode:/vscode.git/clone" did not exist on "be444e52d99ef94052c9b5a574c4c41ac41f8868"
Commit
d0313326
authored
Oct 22, 2018
by
VoVAllen
Browse files
delete legacy codes
parent
7bad9178
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
0 additions
and
228 deletions
+0
-228
examples/pytorch/capsule/capsule.py
examples/pytorch/capsule/capsule.py
+0
-22
examples/pytorch/capsule/capsule_dgl.py
examples/pytorch/capsule/capsule_dgl.py
+0
-34
examples/pytorch/capsule/capsule_model.py
examples/pytorch/capsule/capsule_model.py
+0
-46
examples/pytorch/capsule/original.py
examples/pytorch/capsule/original.py
+0
-116
examples/pytorch/capsule/test_capsule.py
examples/pytorch/capsule/test_capsule.py
+0
-10
No files found.
examples/pytorch/capsule/capsule.py
deleted
100644 → 0
View file @
7bad9178
import
torch
from
torchvision
import
datasets
,
transforms
def
main
():
batch_size
=
32
test_batch_size
=
32
dataset_transform
=
transforms
.
Compose
([
transforms
.
ToTensor
(),
transforms
.
Normalize
((
0.1307
,),
(
0.3081
,))
])
train_dataset
=
datasets
.
MNIST
(
'../data'
,
train
=
True
,
download
=
True
,
transform
=
dataset_transform
)
train_loader
=
torch
.
utils
.
data
.
DataLoader
(
train_dataset
,
batch_size
=
batch_size
,
shuffle
=
True
)
test_dataset
=
datasets
.
MNIST
(
'../data'
,
train
=
False
,
download
=
True
,
transform
=
dataset_transform
)
test_loader
=
torch
.
utils
.
data
.
DataLoader
(
test_dataset
,
batch_size
=
test_batch_size
,
shuffle
=
True
)
if
__name__
==
'__main__'
:
main
()
examples/pytorch/capsule/capsule_dgl.py
deleted
100644 → 0
View file @
7bad9178
import
dgl
import
networkx
as
nx
import
numpy
as
np
from
torch
import
nn
import
torch
import
torch.nn.functional
as
F
def
capsule_message
(
src
,
edge
):
return
{
'ft'
:
src
[
'ft'
],
'bij'
:
edge
[
'b'
]}
class
GATReduce
(
nn
.
Module
):
def
__init__
(
self
,
attn_drop
):
super
(
GATReduce
,
self
).
__init__
()
self
.
attn_drop
=
attn_drop
def
forward
(
self
,
node
,
msgs
):
a
=
torch
.
unsqueeze
(
node
[
'a'
],
0
)
# shape (1, 1)
ft
=
torch
.
cat
([
torch
.
unsqueeze
(
m
[
'ft'
],
0
)
for
m
in
msgs
],
dim
=
0
)
# shape (deg, D)
# attention
e
=
F
.
softmax
(
a
,
dim
=
0
)
if
self
.
attn_drop
!=
0.0
:
e
=
F
.
dropout
(
e
,
self
.
attn_drop
)
return
torch
.
sum
(
e
*
ft
,
dim
=
0
)
# shape (D,)
class
Capsule
(
nn
.
Module
):
def
__init__
(
self
):
super
(
Capsule
,
self
).
__init__
()
self
.
g
=
dgl
.
DGLGraph
(
nx
.
from_numpy_matrix
(
np
.
ones
((
10
,
10
))))
def
forward
(
self
,
node
,
msgs
):
a1
=
torch
.
unsqueeze
(
node
[
'a1'
],
0
)
# shape (1, 1)
a2
=
torch
.
cat
([
torch
.
unsqueeze
(
m
[
'a2'
],
0
)
for
m
in
msgs
],
dim
=
0
)
# shape (deg, 1)
ft
=
torch
.
cat
([
torch
.
unsqueeze
(
m
[
'ft'
],
0
)
for
m
in
msgs
],
dim
=
0
)
# shape (deg, D)
examples/pytorch/capsule/capsule_model.py
deleted
100644 → 0
View file @
7bad9178
import
dgl
import
networkx
as
nx
import
numpy
as
np
import
torch
import
torch.nn.functional
as
F
from
torch
import
nn
from
original
import
CapsuleLayer
class
DGLCapsuleLayer
(
CapsuleLayer
):
def
__init__
(
self
,
in_units
,
in_channels
,
num_units
,
unit_size
,
use_routing
=
True
):
super
(
DGLCapsuleLayer
,
self
).
__init__
(
in_units
,
in_channels
,
num_units
,
unit_size
,
use_routing
=
True
)
self
.
g
=
dgl
.
DGLGraph
(
nx
.
from_numpy_matrix
(
np
.
ones
((
10
,
10
))))
self
.
W
=
nn
.
Parameter
(
torch
.
randn
(
1
,
in_channels
,
num_units
,
unit_size
,
in_units
))
# self.node_features = nn.Parameter(torch.randn(()))
def
routing
(
self
,
x
):
batch_size
=
x
.
size
(
0
)
x
=
x
.
transpose
(
1
,
2
)
x
=
torch
.
stack
([
x
]
*
self
.
num_units
,
dim
=
2
).
unsqueeze
(
4
)
W
=
torch
.
cat
([
self
.
W
]
*
batch_size
,
dim
=
0
)
u_hat
=
torch
.
matmul
(
W
,
x
)
self
.
u_hat
=
u_hat
self
.
node_feature
=
u_hat
.
clone
().
detach
().
transpose
(
0
,
2
).
transpose
(
1
,
2
)
self
.
g
.
set_n_repr
({
'ft'
:
self
.
node_feature
})
self
.
edge_features
=
torch
.
zeros
(
100
,
1
)
self
.
g
.
set_e_repr
({
'b_ij'
:
self
.
edge_features
})
self
.
g
.
update_all
(
self
.
capsule_msg
,
self
.
capsule_reduce
,
lambda
x
:
x
)
self
.
g
.
update_all
(
self
.
capsule_msg
,
self
.
capsule_reduce
,
lambda
x
:
x
)
self
.
g
.
update_all
(
self
.
capsule_msg
,
self
.
capsule_reduce
,
lambda
x
:
x
)
self
.
edge_features
=
self
.
edge_features
+
torch
.
dot
(
self
.
u_hat
,
self
.
node_feature
)
@
staticmethod
def
capsule_msg
(
src
,
edge
):
return
{
'b_ij'
:
edge
[
'weight'
],
'h'
:
src
[
'ft'
]}
def
capsule_reduce
(
self
,
node
,
msg
):
b_ij
,
h
=
msg
b_ij_c
,
h_c
=
torch
.
cat
(
b_ij
,
dim
=
1
),
torch
.
cat
(
h
,
dim
=
1
)
c_i
=
F
.
softmax
(
b_ij_c
,
dim
=
1
)
s_j
=
torch
.
dot
(
c_i
,
self
.
u_hat
)
v_j
=
self
.
squash
(
s_j
)
examples/pytorch/capsule/original.py
deleted
100644 → 0
View file @
7bad9178
import
torch
import
torch.nn
as
nn
import
torch.optim
as
optim
from
torch.autograd
import
Variable
from
torchvision
import
datasets
,
transforms
import
torch.nn.functional
as
F
class
ConvUnit
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
):
super
(
ConvUnit
,
self
).
__init__
()
self
.
conv0
=
nn
.
Conv2d
(
in_channels
=
in_channels
,
out_channels
=
32
,
# fixme constant
kernel_size
=
9
,
# fixme constant
stride
=
2
,
# fixme constant
bias
=
True
)
def
forward
(
self
,
x
):
return
self
.
conv0
(
x
)
class
CapsuleLayer
(
nn
.
Module
):
def
__init__
(
self
,
in_units
,
in_channels
,
num_units
,
unit_size
,
use_routing
):
super
(
CapsuleLayer
,
self
).
__init__
()
self
.
in_units
=
in_units
self
.
in_channels
=
in_channels
self
.
num_units
=
num_units
self
.
use_routing
=
use_routing
if
self
.
use_routing
:
# In the paper, the deeper capsule layer(s) with capsule inputs (DigitCaps) use a special routing algorithm
# that uses this weight matrix.
self
.
W
=
nn
.
Parameter
(
torch
.
randn
(
1
,
in_channels
,
num_units
,
unit_size
,
in_units
))
else
:
# The first convolutional capsule layer (PrimaryCapsules in the paper) does not perform routing.
# Instead, it is composed of several convolutional units, each of which sees the full input.
# It is implemented as a normal convolutional layer with a special nonlinearity (squash()).
def
create_conv_unit
(
unit_idx
):
unit
=
ConvUnit
(
in_channels
=
in_channels
)
self
.
add_module
(
"unit_"
+
str
(
unit_idx
),
unit
)
return
unit
self
.
units
=
[
create_conv_unit
(
i
)
for
i
in
range
(
self
.
num_units
)]
@
staticmethod
def
squash
(
s
):
# This is equation 1 from the paper.
mag_sq
=
torch
.
sum
(
s
**
2
,
dim
=
2
,
keepdim
=
True
)
mag
=
torch
.
sqrt
(
mag_sq
)
s
=
(
mag_sq
/
(
1.0
+
mag_sq
))
*
(
s
/
mag
)
return
s
def
forward
(
self
,
x
):
if
self
.
use_routing
:
return
self
.
routing
(
x
)
else
:
return
self
.
no_routing
(
x
)
def
no_routing
(
self
,
x
):
# Get output for each unit.
# Each will be (batch, channels, height, width).
u
=
[
self
.
units
[
i
](
x
)
for
i
in
range
(
self
.
num_units
)]
# Stack all unit outputs (batch, unit, channels, height, width).
u
=
torch
.
stack
(
u
,
dim
=
1
)
# Flatten to (batch, unit, output).
u
=
u
.
view
(
x
.
size
(
0
),
self
.
num_units
,
-
1
)
# Return squashed outputs.
return
CapsuleLayer
.
squash
(
u
)
def
routing
(
self
,
x
):
batch_size
=
x
.
size
(
0
)
# (batch, in_units, features) -> (batch, features, in_units)
x
=
x
.
transpose
(
1
,
2
)
# (batch, features, in_units) -> (batch, features, num_units, in_units, 1)
x
=
torch
.
stack
([
x
]
*
self
.
num_units
,
dim
=
2
).
unsqueeze
(
4
)
# (batch, features, in_units, unit_size, num_units)
W
=
torch
.
cat
([
self
.
W
]
*
batch_size
,
dim
=
0
)
# Transform inputs by weight matrix.
# (batch_size, features, num_units, unit_size, 1)
u_hat
=
torch
.
matmul
(
W
,
x
)
# Initialize routing logits to zero.
b_ij
=
Variable
(
torch
.
zeros
(
1
,
self
.
in_channels
,
self
.
num_units
,
1
)).
cuda
()
# Iterative routing.
num_iterations
=
3
for
iteration
in
range
(
num_iterations
):
# Convert routing logits to softmax.
# (batch, features, num_units, 1, 1)
c_ij
=
F
.
softmax
(
b_ij
)
c_ij
=
torch
.
cat
([
c_ij
]
*
batch_size
,
dim
=
0
).
unsqueeze
(
4
)
# Apply routing (c_ij) to weighted inputs (u_hat).
# (batch_size, 1, num_units, unit_size, 1)
s_j
=
(
c_ij
*
u_hat
).
sum
(
dim
=
1
,
keepdim
=
True
)
# (batch_size, 1, num_units, unit_size, 1)
v_j
=
CapsuleLayer
.
squash
(
s_j
)
# (batch_size, features, num_units, unit_size, 1)
v_j1
=
torch
.
cat
([
v_j
]
*
self
.
in_channels
,
dim
=
1
)
# (1, features, num_units, 1)
u_vj1
=
torch
.
matmul
(
u_hat
.
transpose
(
3
,
4
),
v_j1
).
squeeze
(
4
).
mean
(
dim
=
0
,
keepdim
=
True
)
# Update b_ij (routing)
b_ij
=
b_ij
+
u_vj1
return
v_j
.
squeeze
(
1
)
\ No newline at end of file
examples/pytorch/capsule/test_capsule.py
deleted
100644 → 0
View file @
7bad9178
from
capsule_model
import
DGLCapsuleLayer
import
torch
as
th
device
=
'cuda'
model
=
DGLCapsuleLayer
(
in_units
=
8
,
in_channels
=
1152
,
num_units
=
10
,
use_routing
=
True
,
unit_size
=
16
)
x
=
th
.
randn
((
128
,
8
,
1152
))
model
(
x
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment