import torch import torch.nn as nn import torch.nn.functional as F from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer from layers.SelfAttention_Family import ProbAttention, AttentionLayer from layers.Embed import DataEmbedding class Model(nn.Module): """ Informer with Propspare attention in O(LlogL) complexity Paper link: https://ojs.aaai.org/index.php/AAAI/article/view/17325/17132 """ def __init__(self, configs): super(Model, self).__init__() self.pred_len = configs.pred_len self.label_len = configs.label_len if configs.channel_independence: self.enc_in = 1 self.dec_in = 1 self.c_out = 1 else: self.enc_in = configs.enc_in self.dec_in = configs.dec_in self.c_out = configs.c_out # Embedding self.enc_embedding = DataEmbedding(self.enc_in, configs.d_model, configs.embed, configs.freq, configs.dropout) self.dec_embedding = DataEmbedding(self.dec_in, configs.d_model, configs.embed, configs.freq, configs.dropout) # Encoder self.encoder = Encoder( [ EncoderLayer( AttentionLayer( ProbAttention(False, configs.factor, attention_dropout=configs.dropout, output_attention=configs.output_attention), configs.d_model, configs.n_heads), configs.d_model, configs.d_ff, dropout=configs.dropout, activation=configs.activation ) for l in range(configs.e_layers) ], [ ConvLayer( configs.d_model ) for l in range(configs.e_layers - 1) ] if configs.distil else None, norm_layer=torch.nn.LayerNorm(configs.d_model) ) # Decoder self.decoder = Decoder( [ DecoderLayer( AttentionLayer( ProbAttention(True, configs.factor, attention_dropout=configs.dropout, output_attention=False), configs.d_model, configs.n_heads), AttentionLayer( ProbAttention(False, configs.factor, attention_dropout=configs.dropout, output_attention=False), configs.d_model, configs.n_heads), configs.d_model, configs.d_ff, dropout=configs.dropout, activation=configs.activation, ) for l in range(configs.d_layers) ], norm_layer=torch.nn.LayerNorm(configs.d_model), projection=nn.Linear(configs.d_model, configs.c_out, bias=True) ) def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec): enc_out = self.enc_embedding(x_enc, x_mark_enc) dec_out = self.dec_embedding(x_dec, x_mark_dec) enc_out, attns = self.encoder(enc_out, attn_mask=None) dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None) return dec_out # [B, L, D] def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None): dec_out = self.long_forecast(x_enc, x_mark_enc, x_dec, x_mark_dec) return dec_out[:, -self.pred_len:, :] # [B, L, D]