model.py 3.4 KB
Newer Older
1
import math
2

3
4
5
import torch
import torch.nn as nn
import torch.nn.functional as F
6
7
import torch.nn.init as init

8
9
10
from dgl.nn.pytorch import GraphConv
from dgl.nn.pytorch.conv import ChebConv

11

12
class TemporalConvLayer(nn.Module):
13
14
    """Temporal convolution layer.

15
16
17
18
19
20
21
22
    arguments
    ---------
    c_in : int
        The number of input channels (features)
    c_out : int
        The number of output channels (features)
    dia : int
        The dilation size
23
24
25
    """

    def __init__(self, c_in, c_out, dia=1):
26
27
28
        super(TemporalConvLayer, self).__init__()
        self.c_out = c_out
        self.c_in = c_in
29
30
31
        self.conv = nn.Conv2d(
            c_in, c_out, (2, 1), 1, dilation=dia, padding=(0, 0)
        )
32
33
34
35
36
37

    def forward(self, x):
        return torch.relu(self.conv(x))


class SpatioConvLayer(nn.Module):
38
    def __init__(self, c, Lk):  # c : hidden dimension Lk: graph matrix
39
40
41
42
43
44
        super(SpatioConvLayer, self).__init__()
        self.g = Lk
        self.gc = GraphConv(c, c, activation=F.relu)
        # self.gc = ChebConv(c, c, 3)

    def init(self):
45
        stdv = 1.0 / math.sqrt(self.W.weight.size(1))
46
47
48
49
50
51
52
53
54
55
        self.W.weight.data.uniform_(-stdv, stdv)

    def forward(self, x):
        x = x.transpose(0, 3)
        x = x.transpose(1, 3)
        output = self.gc(self.g, x)
        output = output.transpose(1, 3)
        output = output.transpose(0, 3)
        return torch.relu(output)

56

57
58
59
60
61
62
63
64
class FullyConvLayer(nn.Module):
    def __init__(self, c):
        super(FullyConvLayer, self).__init__()
        self.conv = nn.Conv2d(c, 1, 1)

    def forward(self, x):
        return self.conv(x)

65

66
67
68
class OutputLayer(nn.Module):
    def __init__(self, c, T, n):
        super(OutputLayer, self).__init__()
69
        self.tconv1 = nn.Conv2d(c, c, (T, 1), 1, dilation=1, padding=(0, 0))
70
        self.ln = nn.LayerNorm([n, c])
71
        self.tconv2 = nn.Conv2d(c, c, (1, 1), 1, dilation=1, padding=(0, 0))
72
73
74
75
76
77
78
79
        self.fc = FullyConvLayer(c)

    def forward(self, x):
        x_t1 = self.tconv1(x)
        x_ln = self.ln(x_t1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        x_t2 = self.tconv2(x_ln)
        return self.fc(x_t2)

80

81
class STGCN_WAVE(nn.Module):
82
83
84
    def __init__(
        self, c, T, n, Lk, p, num_layers, device, control_str="TNTSTNTST"
    ):
85
        super(STGCN_WAVE, self).__init__()
86
        self.control_str = control_str  # model structure controller
87
        self.num_layers = len(control_str)
88
        self.layers = nn.ModuleList([])
89
90
91
92
        cnt = 0
        diapower = 0
        for i in range(self.num_layers):
            i_layer = control_str[i]
93
94
95
96
            if i_layer == "T":  # Temporal Layer
                self.layers.append(
                    TemporalConvLayer(c[cnt], c[cnt + 1], dia=2**diapower)
                )
97
98
                diapower += 1
                cnt += 1
99
            if i_layer == "S":  # Spatio Layer
100
                self.layers.append(SpatioConvLayer(c[cnt], Lk))
101
102
103
            if i_layer == "N":  # Norm Layer
                self.layers.append(nn.LayerNorm([n, c[cnt]]))
        self.output = OutputLayer(c[cnt], T + 1 - 2 ** (diapower), n)
104
        for layer in self.layers:
Tomohiro Endo's avatar
Tomohiro Endo committed
105
            layer = layer.to(device)
106

107
108
109
    def forward(self, x):
        for i in range(self.num_layers):
            i_layer = self.control_str[i]
110
111
            if i_layer == "N":
                x = self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
112
113
114
            else:
                x = self.layers[i](x)
        return self.output(x)