Commit 36fd4ffe authored by rusty1s's avatar rusty1s
Browse files

small benchmark done

parent d3975fdc
......@@ -139,77 +139,3 @@ params:
grad_norm: null
epochs: 1000
runs: 10
Reddit:
architecture:
num_layers: 4
hidden_channels: 128
dropout: 0.0
alpha: 0.1
theta: 0.5
num_parts: 24
batch_size: 12
num_workers: 0
lr: 0.01
reg_weight_decay: 0
nonreg_weight_decay: 0
grad_norm: null
epochs: 1000
runs: 1
Flickr:
architecture:
num_layers: 4
hidden_channels: 256
dropout: 0.0
alpha: 0.1
theta: 0.5
residual: false
shared_weights: true
num_parts: 24
batch_size: 12
num_workers: 0
lr: 0.01
reg_weight_decay: 0
nonreg_weight_decay: 0
grad_norm: null
epochs: 1000
runs: 1
Yelp:
architecture:
num_layers: 2
hidden_channels: 256
dropout: 0.0
alpha: 0.1
theta: 0.5
residual: false
shared_weights: true
num_parts: 40
batch_size: 10
num_workers: 0
lr: 0.01
reg_weight_decay: 0
nonreg_weight_decay: 0
grad_norm: null
epochs: 1000
runs: 1
PPI:
architecture:
num_layers: 9
hidden_channels: 2048
dropout: 0.2
alpha: 0.5
theta: 1.0
residual: true
shared_weights: false
num_parts: 12
batch_size: 1
num_workers: 0
lr: 0.001
reg_weight_decay: 0
nonreg_weight_decay: 0
grad_norm: 1.0
epochs: 3000
runs: 1
from .base import ScalableGNN
from .gcn import GCN
from .gat import GAT
# from .appnp import APPNP
# from .gcn2 import GCN2
# from .pna import PNA
# from .pna_jk import PNA_JK
from .appnp import APPNP
from .gcn2 import GCN2
from .pna import PNA
from .pna_jk import PNA_JK
__all__ = [
'ScalableGNN',
'GCN',
'GAT',
# 'APPNP',
# 'GCN2',
# 'PNA',
# 'PNA_JK',
'APPNP',
'GCN2',
'PNA',
'PNA_JK',
]
......@@ -6,15 +6,16 @@ import torch.nn.functional as F
from torch.nn import ModuleList, Linear
from torch_sparse import SparseTensor
from .base import HistoryGNN
from torch_geometric_autoscale.models import ScalableGNN
class APPNP(HistoryGNN):
class APPNP(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int,
out_channels: int, num_layers: int, alpha: float,
dropout: float = 0.0, device=None, dtype=None):
dropout: float = 0.0, pool_size: Optional[int] = None,
buffer_size: Optional[int] = None, device=None):
super(APPNP, self).__init__(num_nodes, out_channels, num_layers,
device, dtype)
pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
......@@ -35,50 +36,32 @@ class APPNP(HistoryGNN):
def forward(self, x: Tensor, adj_t: SparseTensor,
batch_size: Optional[int] = None,
n_id: Optional[Tensor] = None) -> Tensor:
n_id: Optional[Tensor] = None, offset: Optional[Tensor] = None,
count: Optional[Tensor] = None) -> Tensor:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
x = x.relu()
x = F.dropout(x, p=self.dropout, training=self.training)
x = x_0 = self.lins[1](x)
x = self.lins[1](x)
x_0 = x[:adj_t.size(0)]
for history in self.histories:
x = (1 - self.alpha) * (adj_t @ x) + self.alpha * x_0
x = self.push_and_pull(history, x, batch_size, n_id)
x = self.push_and_pull(history, x, batch_size, n_id, offset, count)
x = (1 - self.alpha) * (adj_t @ x) + self.alpha * x_0
if batch_size is not None:
x = x[:batch_size]
return x
@torch.no_grad()
def mini_inference(self, x: Tensor, loader) -> Tensor:
def forward_layer(self, layer, x, adj_t, state):
if layer == 0:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
x = x.relu()
x = F.dropout(x, p=self.dropout, training=self.training)
x = x_0 = self.lins[1](x)
state['x_0'] = x_0[:adj_t.size(0)]
for history in self.histories:
for info in loader:
info = info.to(self.device)
batch_size, n_id, adj_t, e_id = info
h = x[n_id]
h_0 = x_0[n_id]
h = (1 - self.alpha) * (adj_t @ h) + self.alpha * h_0
history.push_(h[:batch_size], n_id[:batch_size])
x = history.pull()
out = x.new_empty(self.num_nodes, self.out_channels)
for info in loader:
info = info.to(self.device)
batch_size, n_id, adj_t, e_id = info
h = x[n_id]
h_0 = x_0[n_id]
h = (1 - self.alpha) * (adj_t @ h) + self.alpha * h_0
out[n_id[:batch_size]] = h
return out
x = (1 - self.alpha) * (adj_t @ x) + self.alpha * state['x_0']
return x
......@@ -7,7 +7,7 @@ from torch.nn import ModuleList, Linear, BatchNorm1d
from torch_sparse import SparseTensor
from torch_geometric.nn import GCN2Conv
from scaling_gnns.models.base2 import ScalableGNN
from torch_geometric_autoscale.models import ScalableGNN
class GCN2(ScalableGNN):
......
......@@ -8,7 +8,7 @@ from torch.nn import ModuleList, Linear, BatchNorm1d
from torch_sparse import SparseTensor
from torch_geometric.nn import MessagePassing
from scaling_gnns.models.base2 import ScalableGNN
from torch_geometric_autoscale.models import ScalableGNN
EPS = 1e-5
......
......@@ -7,8 +7,8 @@ from torch.nn import (ModuleList, Linear, BatchNorm1d, Sequential, ReLU,
Identity)
from torch_sparse import SparseTensor
from scaling_gnns.models.base2 import ScalableGNN
from scaling_gnns.models.pna import PNAConv
from torch_geometric_autoscale.models import ScalableGNN
from torch_geometric_autoscale.models.pna import PNAConv
class PNA_JK(ScalableGNN):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment