# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/models.mlp.ipynb.
# %% auto 0
__all__ = ['MLP']
# %% ../../nbs/models.mlp.ipynb 5
from typing import Optional
import torch
import torch.nn as nn
from ..losses.pytorch import MAE
from ..common._base_windows import BaseWindows
# %% ../../nbs/models.mlp.ipynb 6
class MLP(BaseWindows):
"""MLP
Simple Multi Layer Perceptron architecture (MLP).
This deep neural network has constant units through its layers, each with
ReLU non-linearities, it is trained using ADAM stochastic gradient descent.
The network accepts static, historic and future exogenous data, flattens
the inputs and learns fully connected relationships against the target variable.
**Parameters:**
`h`: int, forecast horizon.
`input_size`: int, considered autorregresive inputs (lags), y=[1,2,3,4] input_size=2 -> lags=[1,2].
`stat_exog_list`: str list, static exogenous columns.
`hist_exog_list`: str list, historic exogenous columns.
`futr_exog_list`: str list, future exogenous columns.
`exclude_insample_y`: bool=False, the model skips the autoregressive features y[t-input_size:t] if True.
`n_layers`: int, number of layers for the MLP.
`hidden_size`: int, number of units for each layer of the MLP.
`loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
`valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).
`max_steps`: int=1000, maximum number of training steps.
`learning_rate`: float=1e-3, Learning rate between (0, 1).
`num_lr_decays`: int=-1, Number of learning rate decays, evenly distributed across max_steps.
`early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.
`val_check_steps`: int=100, Number of training steps between every validation loss check.
`batch_size`: int=32, number of different series in each batch.
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.
`inference_windows_batch_size`: int=-1, number of windows to sample in each inference batch, -1 uses all.
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.
`step_size`: int=1, step size between each window of temporal data.
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).
`random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
`num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
`optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
"""
# Class attributes
SAMPLING_TYPE = "windows"
def __init__(
self,
h,
input_size,
futr_exog_list=None,
hist_exog_list=None,
stat_exog_list=None,
exclude_insample_y=False,
num_layers=2,
hidden_size=1024,
loss=MAE(),
valid_loss=None,
max_steps: int = 1000,
learning_rate: float = 1e-3,
num_lr_decays: int = -1,
early_stop_patience_steps: int = -1,
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size=1024,
inference_windows_batch_size=-1,
start_padding_enabled=False,
step_size: int = 1,
scaler_type: str = "identity",
random_seed: int = 1,
num_workers_loader: int = 0,
drop_last_loader: bool = False,
optimizer=None,
optimizer_kwargs=None,
**trainer_kwargs
):
# Inherit BaseWindows class
super(MLP, self).__init__(
h=h,
input_size=input_size,
futr_exog_list=futr_exog_list,
hist_exog_list=hist_exog_list,
stat_exog_list=stat_exog_list,
exclude_insample_y=exclude_insample_y,
loss=loss,
valid_loss=valid_loss,
max_steps=max_steps,
learning_rate=learning_rate,
num_lr_decays=num_lr_decays,
early_stop_patience_steps=early_stop_patience_steps,
val_check_steps=val_check_steps,
batch_size=batch_size,
valid_batch_size=valid_batch_size,
windows_batch_size=windows_batch_size,
inference_windows_batch_size=inference_windows_batch_size,
start_padding_enabled=start_padding_enabled,
step_size=step_size,
scaler_type=scaler_type,
num_workers_loader=num_workers_loader,
drop_last_loader=drop_last_loader,
random_seed=random_seed,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
**trainer_kwargs
)
# Architecture
self.num_layers = num_layers
self.hidden_size = hidden_size
self.futr_input_size = len(self.futr_exog_list)
self.hist_input_size = len(self.hist_exog_list)
self.stat_input_size = len(self.stat_exog_list)
input_size_first_layer = (
input_size
+ self.hist_input_size * input_size
+ self.futr_input_size * (input_size + h)
+ self.stat_input_size
)
# MultiLayer Perceptron
layers = [
nn.Linear(in_features=input_size_first_layer, out_features=hidden_size)
]
for i in range(num_layers - 1):
layers += [nn.Linear(in_features=hidden_size, out_features=hidden_size)]
self.mlp = nn.ModuleList(layers)
# Adapter with Loss dependent dimensions
self.out = nn.Linear(
in_features=hidden_size, out_features=h * self.loss.outputsize_multiplier
)
def forward(self, windows_batch):
# Parse windows_batch
insample_y = windows_batch["insample_y"]
futr_exog = windows_batch["futr_exog"]
hist_exog = windows_batch["hist_exog"]
stat_exog = windows_batch["stat_exog"]
# Flatten MLP inputs [B, L+H, C] -> [B, (L+H)*C]
# Contatenate [ Y_t, | X_{t-L},..., X_{t} | F_{t-L},..., F_{t+H} | S ]
batch_size = len(insample_y)
if self.hist_input_size > 0:
insample_y = torch.cat(
(insample_y, hist_exog.reshape(batch_size, -1)), dim=1
)
if self.futr_input_size > 0:
insample_y = torch.cat(
(insample_y, futr_exog.reshape(batch_size, -1)), dim=1
)
if self.stat_input_size > 0:
insample_y = torch.cat(
(insample_y, stat_exog.reshape(batch_size, -1)), dim=1
)
y_pred = insample_y.clone()
for layer in self.mlp:
y_pred = torch.relu(layer(y_pred))
y_pred = self.out(y_pred)
y_pred = y_pred.reshape(batch_size, self.h, self.loss.outputsize_multiplier)
y_pred = self.loss.domain_map(y_pred)
return y_pred