Commit 7cb50072 authored by VoVAllen's avatar VoVAllen Committed by Minjie Wang
Browse files

[Doc][Model] New Capsule Tutorial & Example (#143)

* new capsule tutorial

* capsule for new API

* fix deprecated API

* New tutorial and example

* investigate gc problem

* add viz code

* new capsule tutorial

* remove ipynb

* move u_hat

* add link

* add requirements.txt

* remove ani.save

* update ci to install requirements

* add graphviz
parent a95459e3
# install libraries for building c++ core on ubuntu # install libraries for building c++ core on ubuntu
apt update && apt install -y --no-install-recommends --force-yes \ apt update && apt install -y --no-install-recommends --force-yes \
apt-utils git build-essential make cmake wget unzip sudo \ apt-utils git build-essential make cmake wget unzip sudo \
libz-dev libxml2-dev libopenblas-dev libopencv-dev ca-certificates libz-dev libxml2-dev libopenblas-dev libopencv-dev \
libgraphviz-dev ca-certificates
...@@ -2,82 +2,39 @@ import dgl ...@@ -2,82 +2,39 @@ import dgl
import torch import torch
from torch import nn from torch import nn
from torch.nn import functional as F from torch.nn import functional as F
import dgl.function as fn
from DGLRoutingLayer import DGLRoutingLayer
class DGLDigitCapsuleLayer(nn.Module): class DGLDigitCapsuleLayer(nn.Module):
def __init__(self, input_capsule_dim=8, input_capsule_num=1152, output_capsule_num=10, output_capsule_dim=16, def __init__(self, in_nodes_dim=8, in_nodes=1152, out_nodes=10, out_nodes_dim=16, device='cpu'):
num_routing=3, device='cpu'
):
super(DGLDigitCapsuleLayer, self).__init__() super(DGLDigitCapsuleLayer, self).__init__()
self.device = device self.device = device
self.input_capsule_dim = input_capsule_dim self.in_nodes_dim, self.out_nodes_dim = in_nodes_dim, out_nodes_dim
self.input_capsule_num = input_capsule_num self.in_nodes, self.out_nodes = in_nodes, out_nodes
self.output_capsule_dim = output_capsule_dim self.weight = nn.Parameter(torch.randn(in_nodes, out_nodes, out_nodes_dim, in_nodes_dim))
self.output_capsule_num = output_capsule_num
self.num_routing = num_routing
self.weight = nn.Parameter(
torch.randn(input_capsule_num, output_capsule_num, output_capsule_dim, input_capsule_dim))
self.g, self.input_nodes, self.output_nodes = self.construct_graph()
def construct_graph(self):
g = dgl.DGLGraph()
g.add_nodes(self.input_capsule_num + self.output_capsule_num)
input_nodes = list(range(self.input_capsule_num))
output_nodes = list(range(self.input_capsule_num, self.input_capsule_num + self.output_capsule_num))
u, v = [], []
for i in input_nodes:
for j in output_nodes:
u.append(i)
v.append(j)
g.add_edges(u, v)
return g, input_nodes, output_nodes
def forward(self, x): def forward(self, x):
self.batch_size = x.size(0) self.batch_size = x.size(0)
u_hat = self.compute_uhat(x)
routing = DGLRoutingLayer(self.in_nodes, self.out_nodes, self.out_nodes_dim, batch_size=self.batch_size,
device=self.device)
routing(u_hat, routing_num=3)
out_nodes_feature = routing.g.nodes[routing.out_indx].data['v']
routing.end()
# shape transformation is for further classification
return out_nodes_feature.transpose(0, 1).unsqueeze(1).unsqueeze(4).squeeze(1)
def compute_uhat(self, x):
# x is the input vextor with shape [batch_size, in_nodes_dim, in_nodes]
# Transpose x to [batch_size, in_nodes, in_nodes_dim]
x = x.transpose(1, 2) x = x.transpose(1, 2)
x = torch.stack([x] * self.output_capsule_num, dim=2).unsqueeze(4) # Expand x to [batch_size, in_nodes, out_nodes, in_nodes_dim, 1]
x = torch.stack([x] * self.out_nodes, dim=2).unsqueeze(4)
# Expand W from [in_nodes, out_nodes, in_nodes_dim, out_nodes_dim]
# to [batch_size, in_nodes, out_nodes, out_nodes_dim, in_nodes_dim]
W = self.weight.expand(self.batch_size, *self.weight.size()) W = self.weight.expand(self.batch_size, *self.weight.size())
# u_hat's shape is [in_nodes, out_nodes, batch_size, out_nodes_dim]
u_hat = torch.matmul(W, x).permute(1, 2, 0, 3, 4).squeeze().contiguous() u_hat = torch.matmul(W, x).permute(1, 2, 0, 3, 4).squeeze().contiguous()
return u_hat.view(-1, self.batch_size, self.out_nodes_dim)
b_ij = torch.zeros(self.input_capsule_num, self.output_capsule_num).to(self.device)
self.g.set_e_repr({'b_ij': b_ij.view(-1)})
self.g.set_e_repr({'u_hat': u_hat.view(-1, self.batch_size, self.output_capsule_dim)})
node_features = torch.zeros(self.input_capsule_num + self.output_capsule_num, self.batch_size,
self.output_capsule_dim).to(self.device)
self.g.set_n_repr({'h': node_features})
for i in range(self.num_routing):
self.g.update_all(self.capsule_msg, self.capsule_reduce, self.capsule_update)
self.g.update_edge(edge_func=self.update_edge)
this_layer_nodes_feature = self.g.get_n_repr()['h'][
self.input_capsule_num:self.input_capsule_num + self.output_capsule_num]
return this_layer_nodes_feature.transpose(0, 1).unsqueeze(1).unsqueeze(4).squeeze(1)
def update_edge(self, u, v, edge):
return {'b_ij': edge['b_ij'] + (v['h'] * edge['u_hat']).mean(dim=1).sum(dim=1)}
@staticmethod
def capsule_msg(src, edge):
return {'b_ij': edge['b_ij'], 'h': src['h'], 'u_hat': edge['u_hat']}
@staticmethod
def capsule_reduce(node, msg):
b_ij_c, u_hat = msg['b_ij'], msg['u_hat']
c_i = F.softmax(b_ij_c, dim=0)
s_j = (c_i.unsqueeze(2).unsqueeze(3) * u_hat).sum(dim=1)
return {'h': s_j}
@staticmethod
def capsule_update(msg):
v_j = squash(msg['h'])
return {'h': v_j}
def squash(s, dim=2):
sq = torch.sum(s ** 2, dim=dim, keepdim=True)
s_std = torch.sqrt(sq)
s = (sq / (1.0 + sq)) * (s / s_std)
return s
import torch.nn as nn
import torch as th
import torch.nn.functional as F
import dgl
class DGLRoutingLayer(nn.Module):
def __init__(self, in_nodes, out_nodes, f_size, batch_size=0, device='cpu'):
super(DGLRoutingLayer, self).__init__()
self.batch_size = batch_size
self.g = init_graph(in_nodes, out_nodes, f_size, device=device, batch_size=batch_size)
self.in_nodes = in_nodes
self.out_nodes = out_nodes
self.in_indx = list(range(in_nodes))
self.out_indx = list(range(in_nodes, in_nodes + out_nodes))
self.device = device
def forward(self, u_hat, routing_num=1):
self.g.edata['u_hat'] = u_hat
for r in range(routing_num):
# step 1 (line 4): normalize over out edges
in_edges = self.g.edata['b'].view(self.in_nodes, self.out_nodes)
self.g.edata['c'] = F.softmax(in_edges, dim=1).view(-1, 1)
def cap_message(edges):
if self.batch_size:
return {'m': edges.data['c'].unsqueeze(1) * edges.data['u_hat']}
else:
return {'m': edges.data['c'] * edges.data['u_hat']}
self.g.register_message_func(cap_message)
# step 2 (line 5)
def cap_reduce(nodes):
return {'s': th.sum(nodes.mailbox['m'], dim=1)}
self.g.register_reduce_func(cap_reduce)
# Execute step 1 & 2
self.g.update_all()
# step 3 (line 6)
if self.batch_size:
self.g.nodes[self.out_indx].data['v'] = squash(self.g.nodes[self.out_indx].data['s'], dim=2)
else:
self.g.nodes[self.out_indx].data['v'] = squash(self.g.nodes[self.out_indx].data['s'], dim=1)
# step 4 (line 7)
v = th.cat([self.g.nodes[self.out_indx].data['v']] * self.in_nodes, dim=0)
if self.batch_size:
self.g.edata['b'] = self.g.edata['b'] + (self.g.edata['u_hat'] * v).mean(dim=1).sum(dim=1, keepdim=True)
else:
self.g.edata['b'] = self.g.edata['b'] + (self.g.edata['u_hat'] * v).sum(dim=1, keepdim=True)
def end(self):
del self.g
# del self.g.edata['u_hat']
# del self.g.ndata['v']
# del self.g.ndata['s']
# del self.g.edata['b']
def squash(s, dim=1):
sq = th.sum(s ** 2, dim=dim, keepdim=True)
s_norm = th.sqrt(sq)
s = (sq / (1.0 + sq)) * (s / s_norm)
return s
def init_graph(in_nodes, out_nodes, f_size, device='cpu', batch_size=0):
g = dgl.DGLGraph()
all_nodes = in_nodes + out_nodes
g.add_nodes(all_nodes)
in_indx = list(range(in_nodes))
out_indx = list(range(in_nodes, in_nodes + out_nodes))
# add edges use edge broadcasting
for u in in_indx:
g.add_edges(u, out_indx)
# init states
if batch_size:
g.ndata['v'] = th.zeros(all_nodes, batch_size, f_size).to(device)
else:
g.ndata['v'] = th.zeros(all_nodes, f_size).to(device)
g.edata['b'] = th.zeros(in_nodes * out_nodes, 1).to(device)
return g
import torch import torch
from torch import nn from torch import nn
from DGLDigitCapsule import DGLDigitCapsuleLayer, squash from DGLDigitCapsule import DGLDigitCapsuleLayer
from DGLRoutingLayer import squash
class Net(nn.Module): class Net(nn.Module):
......
import dgl
import torch as th
import torch.nn as nn
from torch.nn import functional as F
from DGLRoutingLayer import DGLRoutingLayer
g = dgl.DGLGraph()
g.graph_data = {}
in_nodes = 20
out_nodes = 10
g.graph_data['in_nodes']=in_nodes
g.graph_data['out_nodes']=out_nodes
all_nodes = in_nodes + out_nodes
g.add_nodes(all_nodes)
in_indx = list(range(in_nodes))
out_indx = list(range(in_nodes, in_nodes + out_nodes))
g.graph_data['in_indx']=in_indx
g.graph_data['out_indx']=out_indx
# add edges use edge broadcasting
for u in out_indx:
g.add_edges(in_indx, u)
# init states
f_size = 4
g.ndata['v'] = th.zeros(all_nodes, f_size)
g.edata['u_hat'] = th.randn(in_nodes * out_nodes, f_size)
g.edata['b'] = th.randn(in_nodes * out_nodes, 1)
routing_layer = DGLRoutingLayer(g)
entropy_list=[]
for i in range(15):
routing_layer()
dist_matrix = g.edata['c'].view(in_nodes, out_nodes)
entropy = (-dist_matrix * th.log(dist_matrix)).sum(dim=0)
entropy_list.append(entropy.data.numpy())
std = dist_matrix.std(dim=0)
#!/bin/bash #!/bin/bash
# The working directory for this script will be "tests/scripts" # The working directory for this script will be "tests/scripts"
TUTORIAL_ROOT="../../tutorials"
function fail { function fail {
echo FAIL: $@ echo FAIL: $@
exit -1 exit -1
} }
export MPLBACKEND=Agg pushd ${TUTORIAL_ROOT} > /dev/null
# Install requirements
pip3 install -r requirements.txt || fail "installing requirements"
for f in $(find "../../tutorials" -name "*.py") # Test
export MPLBACKEND=Agg
for f in $(find . -name "*.py")
do do
echo "Running tutorial ${f} ..." echo "Running tutorial ${f} ..."
python3 $f || fail "run ${f}" python3 $f || fail "run ${f}"
done done
popd > /dev/null
This diff is collapsed.
networkx
torch
numpy
seaborn
matplotlib
pygraphviz
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment