{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "14ce00e2", "metadata": {}, "outputs": [], "source": [ "#| default_exp auto" ] }, { "cell_type": "code", "execution_count": null, "id": "57869174", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "%load_ext autoreload\n", "%autoreload 2" ] }, { "cell_type": "code", "execution_count": null, "id": "e1167bbe", "metadata": {}, "outputs": [], "source": [ "#| export\n", "from os import cpu_count\n", "import torch\n", "\n", "from ray import tune\n", "from ray.tune.search.basic_variant import BasicVariantGenerator\n", "\n", "from neuralforecast.common._base_auto import BaseAuto\n", "from neuralforecast.common._base_auto import MockTrial\n", "\n", "from neuralforecast.models.rnn import RNN\n", "from neuralforecast.models.gru import GRU\n", "from neuralforecast.models.tcn import TCN\n", "from neuralforecast.models.lstm import LSTM\n", "from neuralforecast.models.deepar import DeepAR\n", "from neuralforecast.models.dilated_rnn import DilatedRNN\n", "from neuralforecast.models.bitcn import BiTCN\n", "\n", "from neuralforecast.models.mlp import MLP\n", "from neuralforecast.models.nbeats import NBEATS\n", "from neuralforecast.models.nbeatsx import NBEATSx\n", "from neuralforecast.models.nhits import NHITS\n", "from neuralforecast.models.dlinear import DLinear\n", "from neuralforecast.models.nlinear import NLinear\n", "\n", "from neuralforecast.models.tft import TFT\n", "from neuralforecast.models.vanillatransformer import VanillaTransformer\n", "from neuralforecast.models.informer import Informer\n", "from neuralforecast.models.autoformer import Autoformer\n", "from neuralforecast.models.fedformer import FEDformer\n", "from neuralforecast.models.patchtst import PatchTST\n", "from neuralforecast.models.timesnet import TimesNet\n", "from neuralforecast.models.itransformer import iTransformer\n", "\n", "from neuralforecast.models.stemgnn import StemGNN\n", "from neuralforecast.models.hint import HINT\n", "from neuralforecast.models.tsmixer import TSMixer\n", "from neuralforecast.models.tsmixerx import TSMixerx\n", "from neuralforecast.models.mlpmultivariate import MLPMultivariate\n", "\n", "from neuralforecast.losses.pytorch import MAE, MQLoss, DistributionLoss" ] }, { "cell_type": "code", "execution_count": null, "id": "495dd890", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "import matplotlib.pyplot as plt\n", "\n", "from fastcore.test import test_eq\n", "from nbdev.showdoc import show_doc\n", "\n", "import logging\n", "import warnings\n", "import inspect\n", "\n", "from neuralforecast.losses.pytorch import MSE" ] }, { "cell_type": "code", "execution_count": null, "id": "490b7416", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "plt.rcParams[\"axes.grid\"]=True\n", "plt.rcParams['font.family'] = 'serif'\n", "plt.rcParams[\"figure.figsize\"] = (6,4)" ] }, { "cell_type": "code", "execution_count": null, "id": "bac3f877", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto class.\n", "\n", "# Patch for Python 3.11 on get arg spec\n", "if not hasattr(inspect, 'getargspec'):\n", " getargspec = inspect.getfullargspec\n", "else:\n", " getargspec = inspect.getargspec\n", "\n", "def test_args(auto_model, exclude_args=None):\n", " base_auto_args = getargspec(BaseAuto)[0]\n", " auto_model_args = getargspec(auto_model)[0]\n", " if exclude_args is not None:\n", " base_auto_args = [arg for arg in base_auto_args if arg not in exclude_args]\n", " args_diff = set(base_auto_args) - set(auto_model_args)\n", " assert not args_diff, f\"__init__ of {auto_model.__name__} does not contain the following required variables from BaseAuto class:\\n\\t\\t{args_diff}\"" ] }, { "attachments": {}, "cell_type": "markdown", "id": "7ae65ca7", "metadata": {}, "source": [ "# Models\n", "\n", "> NeuralForecast contains user-friendly implementations of neural forecasting models that allow for easy transition of computing capabilities (GPU/CPU), computation parallelization, and hyperparameter tuning." ] }, { "cell_type": "markdown", "id": "bc0437d9-7e44-4e6d-bf6b-43c2e9e338f1", "metadata": {}, "source": [ "All the NeuralForecast models are \"global\" because we train them with all the series from the input pd.DataFrame data `Y_df`, yet the optimization objective is, momentarily, \"univariate\" as it does not consider the interaction between the output predictions across time series. Like the StatsForecast library, `core.NeuralForecast` allows you to explore collections of models efficiently and contains functions for convenient wrangling of input and output pd.DataFrames predictions." ] }, { "cell_type": "markdown", "id": "cbf7bd22", "metadata": {}, "source": [ "First we load the AirPassengers dataset such that you can run all the examples." ] }, { "cell_type": "code", "execution_count": null, "id": "b67d291b", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "\n", "from neuralforecast.tsdataset import TimeSeriesDataset\n", "from neuralforecast.utils import AirPassengersDF as Y_df" ] }, { "cell_type": "code", "execution_count": null, "id": "95293f37", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Split train/test and declare time series dataset\n", "Y_train_df = Y_df[Y_df.ds<='1959-12-31'] # 132 train\n", "Y_test_df = Y_df[Y_df.ds>'1959-12-31'] # 12 test\n", "dataset, *_ = TimeSeriesDataset.from_df(Y_train_df)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "5b8183a9", "metadata": {}, "source": [ "# 1. Automatic Forecasting" ] }, { "attachments": {}, "cell_type": "markdown", "id": "858e6a1b", "metadata": {}, "source": [ "## A. RNN-Based" ] }, { "cell_type": "code", "execution_count": null, "id": "60458256", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoRNN(BaseAuto):\n", " \n", " default_config = {\n", " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20)\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None,\n", " ):\n", " \"\"\" Auto RNN\n", " \n", " **Parameters:**
\n", " \n", " \"\"\"\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoRNN, self).__init__(\n", " cls_model=RNN, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config, \n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['inference_input_size'] = tune.choice([h*x \\\n", " for x in config['inference_input_size_multiplier']])\n", " del config['input_size_multiplier'], config['inference_input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "142f7028", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoRNN, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "ce08be9c", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoRNN.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoRNN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoRNN(h=12, config=None, num_samples=1, cpus=1, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "6ad79ef9", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoRNN, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoRNN.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': -1, 'encoder_hidden_size': 8})\n", " return config\n", "\n", "model = AutoRNN(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoRNN.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = -1\n", "my_config['encoder_hidden_size'] = 8\n", "model = AutoRNN(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "821e7999", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoLSTM(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20)\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", "\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoLSTM, self).__init__(\n", " cls_model=LSTM,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['inference_input_size'] = tune.choice([h*x \\\n", " for x in config['inference_input_size_multiplier']])\n", " del config['input_size_multiplier'], config['inference_input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "c1d3965f", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoLSTM, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "9c68f5a0", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoLSTM.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoLSTM(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoLSTM(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "b71ca902", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoLSTM, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoLSTM.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': -1, 'encoder_hidden_size': 8})\n", " return config\n", "\n", "model = AutoLSTM(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoLSTM.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = -1\n", "my_config['encoder_hidden_size'] = 8\n", "model = AutoLSTM(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "1dcaf5a5", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoGRU(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", " \"encoder_n_layers\": tune.randint(1, 4),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20)\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoGRU, self).__init__(\n", " cls_model=GRU,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config, \n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['inference_input_size'] = tune.choice([h*x \\\n", " for x in config['inference_input_size_multiplier']])\n", " del config['input_size_multiplier'], config['inference_input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "b078228a", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoGRU, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "caf08e56", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoGRU.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoGRU(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoGRU(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "f53b50c6", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoGRU, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoGRU.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': -1, 'encoder_hidden_size': 8})\n", " return config\n", "\n", "model = AutoGRU(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoGRU.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = -1\n", "my_config['encoder_hidden_size'] = 8\n", "model = AutoGRU(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "c5d5ed05", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoTCN(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", " \"decoder_hidden_size\": tune.choice([64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20)\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoTCN, self).__init__(\n", " cls_model=TCN,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['inference_input_size'] = tune.choice([h*x \\\n", " for x in config['inference_input_size_multiplier']])\n", " del config['input_size_multiplier'], config['inference_input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "3fc5d09e", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoTCN, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "fb7032bd", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoTCN.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoTCN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoTCN(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "cda47eae", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoTCN, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoTCN.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': -1, 'encoder_hidden_size': 8})\n", " return config\n", "\n", "model = AutoTCN(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoTCN.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = -1\n", "my_config['encoder_hidden_size'] = 8\n", "model = AutoTCN(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "b04f4d4c", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoDeepAR(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"lstm_hidden_size\": tune.choice([32, 64, 128, 256]),\n", " \"lstm_n_layers\": tune.randint(1, 4),\n", " \"lstm_dropout\": tune.uniform(0.0, 0.5),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice(['robust', 'minmax1']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=DistributionLoss(distribution='StudentT', level=[80, 90], return_params=False),\n", " valid_loss=MQLoss(level=[80, 90]),\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoDeepAR, self).__init__(\n", " cls_model=DeepAR, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "132c8547", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoDeepAR, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "61c8a07b", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, lstm_hidden_size=8)\n", "model = AutoDeepAR(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoDeepAR(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "f5aec4e4", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoDeepAR, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoDeepAR.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'lstm_hidden_size': 8})\n", " return config\n", "\n", "model = AutoDeepAR(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoDeepAR.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['lstm_hidden_size'] = 8\n", "model = AutoDeepAR(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "42b60f73", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoDilatedRNN(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [-1, 4, 16, 64],\n", " \"inference_input_size_multiplier\": [-1],\n", " \"h\": None,\n", " \"cell_type\": tune.choice(['LSTM', 'GRU']),\n", " \"encoder_hidden_size\": tune.choice([50, 100, 200, 300]),\n", " \"dilations\": tune.choice([ [[1, 2], [4, 8]], [[1, 2, 4, 8]] ]),\n", " \"context_size\": tune.choice([5, 10, 50]),\n", " \"decoder_hidden_size\": tune.choice([64, 128, 256, 512]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([16, 32]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20)\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoDilatedRNN, self).__init__(\n", " cls_model=DilatedRNN,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", " \n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['inference_input_size'] = tune.choice([h*x \\\n", " for x in config['inference_input_size_multiplier']])\n", " del config['input_size_multiplier'], config['inference_input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "d132351d", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoDilatedRNN, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "533eb1da", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoDilatedRNN.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=-1, encoder_hidden_size=8)\n", "model = AutoDilatedRNN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoDilatedRNN(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "0efe303d", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoDilatedRNN, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoDilatedRNN.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': -1, 'encoder_hidden_size': 8})\n", " return config\n", "\n", "model = AutoDilatedRNN(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoDilatedRNN.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = -1\n", "my_config['encoder_hidden_size'] = 8\n", "model = AutoDilatedRNN(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "7a0616ae", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoBiTCN(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([16, 32]),\n", " \"dropout\": tune.uniform(0.0, 0.99), \n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoBiTCN, self).__init__(\n", " cls_model=BiTCN, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "433d2ef6", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoBiTCN, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "95850f3c", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoBiTCN(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoBiTCN(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "7c905530", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoBiTCN, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoBiTCN.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoBiTCN(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoBiTCN.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoBiTCN(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "7feff99a", "metadata": {}, "source": [ "## B. MLP-Based" ] }, { "cell_type": "code", "execution_count": null, "id": "fa01d378", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoMLP(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice( [256, 512, 1024] ),\n", " \"num_layers\": tune.randint(2, 6),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None, \n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", "\n", " # Define search space, input/output sizes \n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoMLP, self).__init__(\n", " cls_model=MLP,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config, \n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "a4c8ecf5", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoMLP, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "c637d57f", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoMLP.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoMLP(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoMLP(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "7d6b5e08", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoMLP, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoMLP.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoMLP(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoMLP.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoMLP(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "1ed4c88b", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoNBEATS(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes \n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoNBEATS, self).__init__(\n", " cls_model=NBEATS, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "ef824625", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoNBEATS, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "c0f0fe9f", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNBEATS.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12,\n", " mlp_units=3*[[8, 8]])\n", "model = AutoNBEATS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoNBEATS(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "7afac66d", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoNBEATS, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoNBEATS.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12, 'mlp_units': 3 * [[8, 8]]})\n", " return config\n", "\n", "model = AutoNBEATS(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoNBEATS.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['mlp_units'] = 3 * [[8, 8]]\n", "model = AutoNBEATS(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "753c5300", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoNBEATSx(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoNBEATSx, self).__init__(\n", " cls_model=NBEATSx,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "a2829115", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoNBEATSx, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "dca94b63", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNBEATS.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12,\n", " mlp_units=3*[[8, 8]])\n", "model = AutoNBEATSx(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoNBEATSx(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "76334984", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoNBEATSx, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoNBEATSx.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12, 'mlp_units': 3 * [[8, 8]]})\n", " return config\n", "\n", "model = AutoNBEATSx(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoNBEATSx.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['mlp_units'] = 3 * [[8, 8]]\n", "model = AutoNBEATSx(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "79a2ede0", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoNHITS(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"n_pool_kernel_size\": tune.choice([[2, 2, 1], 3*[1], 3*[2], 3*[4], \n", " [8, 4, 1], [16, 8, 1]]),\n", " \"n_freq_downsample\": tune.choice([[168, 24, 1], [24, 12, 1], \n", " [180, 60, 1], [60, 8, 1], \n", " [40, 20, 1], [1, 1, 1]]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.quniform(lower=500, upper=1500, q=100),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(lower=1, upper=20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None,\n", " ):\n", "\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoNHITS, self).__init__(\n", " cls_model=NHITS, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "fd1236fd", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoNHITS, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "cc67276b-6005-4f67-969d-36a90130e0f6", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12, \n", " mlp_units=3 * [[8, 8]])\n", "model = AutoNHITS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoNHITS(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "aa9c4e5c", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoNHITS, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoNHITS.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12, 'mlp_units': 3 * [[8, 8]]})\n", " return config\n", "\n", "model = AutoNHITS(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoNHITS.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['mlp_units'] = 3 * [[8, 8]]\n", "model = AutoNHITS(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "4b5616d7", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoDLinear(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"moving_avg_window\": tune.choice([11, 25, 51]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.quniform(lower=500, upper=1500, q=100),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(lower=1, upper=20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None,\n", " ):\n", "\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoDLinear, self).__init__(\n", " cls_model=DLinear, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "04a1aab7", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoDLinear, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "cc8b0d4b", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoDLinear.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", "model = AutoDLinear(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoDLinear(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "fcf93b8a", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoDLinear, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoDLinear.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoDLinear(h=12, config=my_config_new, backend='optuna', cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoDLinear.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoDLinear(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "180a5a10", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoNLinear(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.quniform(lower=500, upper=1500, q=100),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(lower=1, upper=20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None,\n", " ):\n", "\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoNLinear, self).__init__(\n", " cls_model=NLinear, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples,\n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "6e414458", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoNLinear, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "80759148", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNLinear.default_config\n", "config = dict(max_steps=2, val_check_steps=1, input_size=12)\n", "model = AutoNLinear(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoNLinear(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "a36677a5", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoNLinear, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoNLinear.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoNLinear(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoNLinear.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoNLinear(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "fd705a56", "metadata": {}, "source": [ "## C. Transformer-Based" ] }, { "cell_type": "code", "execution_count": null, "id": "91cd9b23", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoTFT(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"n_head\": tune.choice([4, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoTFT, self).__init__(\n", " cls_model=TFT, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "c1ab1077", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoTFT, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "e283e96f", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoTFT(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoTFT(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "4a78492a", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoTFT, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoTFT.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoTFT(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoTFT.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoTFT(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "28b5ea6a", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoVanillaTransformer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"n_head\": tune.choice([4, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoVanillaTransformer, self).__init__(\n", " cls_model=VanillaTransformer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "265a8197", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoVanillaTransformer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "b3f3e429", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoVanillaTransformer(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoVanillaTransformer(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "8fb6a1ae", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoVanillaTransformer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoVanillaTransformer.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoVanillaTransformer(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoVanillaTransformer.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoVanillaTransformer(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "e9d509ec", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoInformer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"n_head\": tune.choice([4, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoInformer, self).__init__(\n", " cls_model=Informer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "f3c43869", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoInformer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "1648c77b", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoInformer(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoInformer(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "c70e2e8a", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoInformer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoInformer.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoInformer(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoInformer.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoInformer(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "d409d246", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoAutoformer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"n_head\": tune.choice([4, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoAutoformer, self).__init__(\n", " cls_model=Autoformer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "38e34860", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoAutoformer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "38d4b216", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=8)\n", "model = AutoAutoformer(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoAutoformer(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "ee9e62a7", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoAutoformer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoAutoformer.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 8})\n", " return config\n", "\n", "model = AutoAutoformer(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoAutoformer.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 8\n", "model = AutoAutoformer(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "b7c84a19", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoFEDformer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes \n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoFEDformer, self).__init__(\n", " cls_model=FEDformer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "45497479", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoFEDformer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "0b7b1ccf", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=64)\n", "model = AutoFEDformer(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoFEDformer(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "d8b8a02d", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoFEDformer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoFEDformer.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 64})\n", " return config\n", "\n", "model = AutoFEDformer(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoFEDformer.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 64\n", "model = AutoFEDformer(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "ddb3cde6", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoPatchTST(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([16, 128, 256]),\n", " \"n_heads\": tune.choice([4, 16]),\n", " \"patch_len\": tune.choice([16, 24]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"revin\": tune.choice([False, True]),\n", " \"max_steps\": tune.choice([500, 1000, 5000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoPatchTST, self).__init__(\n", " cls_model=PatchTST, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config['input_size_multiplier']]) \n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "9d807209", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoPatchTST, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "13878573", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoNHITS.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=16)\n", "model = AutoPatchTST(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoPatchTST(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "eff3267c", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoPatchTST, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoPatchTST.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 16})\n", " return config\n", "\n", "model = AutoPatchTST(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoPatchTST.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 16\n", "model = AutoPatchTST(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "a61c3be9", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoiTransformer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"n_series\": None,\n", " \"hidden_size\": tune.choice([64, 128, 256]),\n", " \"n_heads\": tune.choice([4, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " n_series,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n", "\n", " # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n", " if backend == 'ray':\n", " config['n_series'] = n_series\n", " elif backend == 'optuna':\n", " mock_trial = MockTrial()\n", " if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n", " raise Exception(f\"config needs 'n_series': {n_series}\") \n", "\n", " super(AutoiTransformer, self).__init__(\n", " cls_model=iTransformer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series):\n", " config = cls.default_config.copy() \n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config[\"input_size_multiplier\"]])\n", "\n", " # Rolling windows with step_size=1 or step_size=h\n", " # See `BaseWindows` and `BaseRNN`'s create_windows\n", " config['step_size'] = tune.choice([1, h])\n", " del config[\"input_size_multiplier\"]\n", " if backend == 'optuna':\n", " # Always use n_series from parameters\n", " config['n_series'] = n_series\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "8f416fa0", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoiTransformer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "7ffd40db", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoiTransformer.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=16)\n", "model = AutoiTransformer(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoiTransformer(h=12, n_series=1, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "7a2052de", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoiTransformer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoiTransformer.get_default_config(h=12, n_series=1, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 16})\n", " return config\n", "\n", "model = AutoiTransformer(h=12, n_series=1, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoiTransformer.get_default_config(h=12, n_series=1, backend='ray')\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 16\n", "model = AutoiTransformer(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "57d6cb1f", "metadata": {}, "source": [ "## D. CNN Based" ] }, { "cell_type": "code", "execution_count": null, "id": "2f775426", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoTimesNet(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"hidden_size\": tune.choice([32, 64, 128]),\n", " \"conv_hidden_size\": tune.choice([32, 64, 128]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice(['robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128]),\n", " \"windows_batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend) \n", "\n", " super(AutoTimesNet, self).__init__(\n", " cls_model=TimesNet, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " config = cls.default_config.copy()\n", " config['input_size'] = tune.choice([h*x \\\n", " for x in config['input_size_multiplier']])\n", " config['step_size'] = tune.choice([1, h]) \n", " del config['input_size_multiplier']\n", " if backend == 'optuna':\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "d2312754", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoTimesNet, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "1467a471", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoTimesNet.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=32)\n", "model = AutoTimesNet(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoTimesNet(h=12, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "7b60eb58", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoTimesNet, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoTimesNet.get_default_config(h=12, backend='optuna')\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 2, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 32})\n", " return config\n", "\n", "model = AutoTimesNet(h=12, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoTimesNet.get_default_config(h=12, backend='ray')\n", "my_config['max_steps'] = 2\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "my_config['hidden_size'] = 32\n", "model = AutoTimesNet(h=12, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e6fd22c7", "metadata": {}, "source": [ "## E. Multivariate" ] }, { "cell_type": "code", "execution_count": null, "id": "b6784e1a", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoStemGNN(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4],\n", " \"h\": None,\n", " \"n_series\": None,\n", " \"n_stacks\": tune.choice([2]),\n", " \"multi_layer\": tune.choice([3, 5, 7]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " n_series,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n", "\n", " # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n", " if backend == 'ray':\n", " config['n_series'] = n_series\n", " elif backend == 'optuna':\n", " mock_trial = MockTrial()\n", " if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n", " raise Exception(f\"config needs 'n_series': {n_series}\")\n", "\n", " super(AutoStemGNN, self).__init__(\n", " cls_model=StemGNN, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series):\n", " config = cls.default_config.copy() \n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config[\"input_size_multiplier\"]])\n", "\n", " # Rolling windows with step_size=1 or step_size=h\n", " # See `BaseWindows` and `BaseRNN`'s create_windows\n", " config['step_size'] = tune.choice([1, h])\n", " del config[\"input_size_multiplier\"]\n", " if backend == 'optuna':\n", " # Always use n_series from parameters\n", " config['n_series'] = n_series\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config " ] }, { "cell_type": "code", "execution_count": null, "id": "1163c1de", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoStemGNN, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "ca99c2ea", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoStemGNN.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoStemGNN(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoStemGNN(h=12, n_series=1, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "6cd03851", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "assert model.config(MockTrial())['n_series'] == 1\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoStemGNN, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoStemGNN.get_default_config(h=12, backend='optuna', n_series=1)\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoStemGNN(h=12, n_series=1, config=my_config_new, backend='optuna')\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoStemGNN.get_default_config(h=12, backend='ray', n_series=1)\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoStemGNN(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "58844aca", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoHINT(BaseAuto):\n", "\n", " def __init__(self,\n", " cls_model,\n", " h,\n", " loss,\n", " valid_loss,\n", " S,\n", " config,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " refit_with_val=False,\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None,\n", " ):\n", " \n", " super(AutoHINT, self).__init__(\n", " cls_model=cls_model, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks,\n", " )\n", " if backend == 'optuna':\n", " raise Exception(\"Optuna is not supported for AutoHINT.\")\n", "\n", " # Validate presence of reconciliation strategy\n", " # parameter in configuration space\n", " if not ('reconciliation' in config.keys()):\n", " raise Exception(\"config needs reconciliation, \\\n", " try tune.choice(['BottomUp', 'MinTraceOLS', 'MinTraceWLS'])\")\n", " self.S = S\n", "\n", " def _fit_model(self, cls_model, config,\n", " dataset, val_size, test_size, distributed_config=None):\n", " # Overwrite _fit_model for HINT two-stage instantiation\n", " reconciliation = config.pop('reconciliation')\n", " base_model = cls_model(**config)\n", " model = HINT(h=base_model.h, model=base_model, \n", " S=self.S, reconciliation=reconciliation)\n", " model.test_size = test_size\n", " model = model.fit(\n", " dataset,\n", " val_size=val_size, \n", " test_size=test_size,\n", " distributed_config=distributed_config,\n", " )\n", " return model\n", " \n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series=None):\n", " raise Exception(\"AutoHINT has no default configuration.\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "622a0888", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoHINT, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "bf210740", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "def sort_df_hier(Y_df, S_df):\n", " # NeuralForecast core, sorts unique_id lexicographically\n", " # by default, this class matches S_df and Y_hat_df order. \n", " Y_df.unique_id = Y_df.unique_id.astype('category')\n", " Y_df.unique_id = Y_df.unique_id.cat.set_categories(S_df.index)\n", " Y_df = Y_df.sort_values(by=['unique_id', 'ds'])\n", " return Y_df\n", "\n", "# -----Create synthetic dataset-----\n", "np.random.seed(123)\n", "train_steps = 20\n", "num_levels = 7\n", "level = np.arange(0, 100, 0.1)\n", "qs = [[50-lv/2, 50+lv/2] for lv in level]\n", "quantiles = np.sort(np.concatenate(qs)/100)\n", "\n", "levels = ['Top', 'Mid1', 'Mid2', 'Bottom1', 'Bottom2', 'Bottom3', 'Bottom4']\n", "unique_ids = np.repeat(levels, train_steps)\n", "\n", "S = np.array([[1., 1., 1., 1.],\n", " [1., 1., 0., 0.],\n", " [0., 0., 1., 1.],\n", " [1., 0., 0., 0.],\n", " [0., 1., 0., 0.],\n", " [0., 0., 1., 0.],\n", " [0., 0., 0., 1.]])\n", "\n", "S_dict = {col: S[:, i] for i, col in enumerate(levels[3:])}\n", "S_df = pd.DataFrame(S_dict, index=levels)\n", "\n", "ds = pd.date_range(start='2018-03-31', periods=train_steps, freq='Q').tolist() * num_levels\n", "# Create Y_df\n", "y_lists = [S @ np.random.uniform(low=100, high=500, size=4) for i in range(train_steps)]\n", "y = [elem for tup in zip(*y_lists) for elem in tup]\n", "Y_df = pd.DataFrame({'unique_id': unique_ids, 'ds': ds, 'y': y})\n", "Y_df = sort_df_hier(Y_df, S_df)\n", "\n", "hint_dataset, *_ = TimeSeriesDataset.from_df(df=Y_df)" ] }, { "cell_type": "code", "execution_count": null, "id": "d41f07c9", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Perform a simple hyperparameter optimization with \n", "# NHITS and then reconcile with HINT\n", "from neuralforecast.losses.pytorch import GMM, sCRPS\n", "\n", "base_config = dict(max_steps=1, val_check_steps=1, input_size=8)\n", "base_model = AutoNHITS(h=4, loss=GMM(n_components=2, quantiles=quantiles), \n", " config=base_config, num_samples=1, cpus=1)\n", "model = HINT(h=4, S=S_df.values,\n", " model=base_model, reconciliation='MinTraceOLS')\n", "\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=hint_dataset)\n", "\n", "# Perform a conjunct hyperparameter optimization with \n", "# NHITS + HINT reconciliation configurations\n", "nhits_config = {\n", " \"learning_rate\": tune.choice([1e-3]), # Initial Learning rate\n", " \"max_steps\": tune.choice([1]), # Number of SGD steps\n", " \"val_check_steps\": tune.choice([1]), # Number of steps between validation\n", " \"input_size\": tune.choice([5 * 12]), # input_size = multiplier * horizon\n", " \"batch_size\": tune.choice([7]), # Number of series in windows\n", " \"windows_batch_size\": tune.choice([256]), # Number of windows in batch\n", " \"n_pool_kernel_size\": tune.choice([[2, 2, 2], [16, 8, 1]]), # MaxPool's Kernelsize\n", " \"n_freq_downsample\": tune.choice([[168, 24, 1], [24, 12, 1], [1, 1, 1]]), # Interpolation expressivity ratios\n", " \"activation\": tune.choice(['ReLU']), # Type of non-linear activation\n", " \"n_blocks\": tune.choice([[1, 1, 1]]), # Blocks per each 3 stacks\n", " \"mlp_units\": tune.choice([[[512, 512], [512, 512], [512, 512]]]), # 2 512-Layers per block for each stack\n", " \"interpolation_mode\": tune.choice(['linear']), # Type of multi-step interpolation\n", " \"random_seed\": tune.randint(1, 10),\n", " \"reconciliation\": tune.choice(['BottomUp', 'MinTraceOLS', 'MinTraceWLS'])\n", " }\n", "model = AutoHINT(h=4, S=S_df.values,\n", " cls_model=NHITS,\n", " config=nhits_config,\n", " loss=GMM(n_components=2, level=[80, 90]),\n", " valid_loss=sCRPS(level=[80, 90]),\n", " num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=hint_dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "58183a37", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoHINT) " ] }, { "cell_type": "code", "execution_count": null, "id": "cfe91bb8", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoTSMixer(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4],\n", " \"h\": None,\n", " \"n_series\": None,\n", " \"n_block\": tune.choice([1, 2, 4, 6, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-2),\n", " \"ff_dim\": tune.choice([32, 64, 128]),\n", " \"scaler_type\": tune.choice(['identity', 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"dropout\": tune.uniform(0.0, 0.99),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " n_series,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n", "\n", " # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n", " if backend == 'ray':\n", " config['n_series'] = n_series\n", " elif backend == 'optuna':\n", " mock_trial = MockTrial()\n", " if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n", " raise Exception(f\"config needs 'n_series': {n_series}\")\n", "\n", " super(AutoTSMixer, self).__init__(\n", " cls_model=TSMixer, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series):\n", " config = cls.default_config.copy() \n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config[\"input_size_multiplier\"]])\n", "\n", " # Rolling windows with step_size=1 or step_size=h\n", " # See `BaseWindows` and `BaseRNN`'s create_windows\n", " config['step_size'] = tune.choice([1, h])\n", " del config[\"input_size_multiplier\"]\n", " if backend == 'optuna':\n", " # Always use n_series from parameters\n", " config['n_series'] = n_series\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "b844b1be", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoTSMixer, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "c3cafb38", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoTSMixer.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoTSMixer(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoTSMixer(h=12, n_series=1, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "062abe64", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "assert model.config(MockTrial())['n_series'] == 1\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoTSMixer, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoTSMixer.get_default_config(h=12, backend='optuna', n_series=1)\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoTSMixer(h=12, n_series=1, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoTSMixer.get_default_config(h=12, backend='ray', n_series=1)\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoTSMixer(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "a83fec63", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoTSMixerx(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4],\n", " \"h\": None,\n", " \"n_series\": None,\n", " \"n_block\": tune.choice([1, 2, 4, 6, 8]),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-2),\n", " \"ff_dim\": tune.choice([32, 64, 128]),\n", " \"scaler_type\": tune.choice(['identity', 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000, 2000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"dropout\": tune.uniform(0.0, 0.99),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " n_series,\n", " loss=MAE(),\n", " valid_loss=None,\n", " config=None, \n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", " \n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n", "\n", " # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n", " if backend == 'ray':\n", " config['n_series'] = n_series\n", " elif backend == 'optuna':\n", " mock_trial = MockTrial()\n", " if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n", " raise Exception(f\"config needs 'n_series': {n_series}\") \n", "\n", " super(AutoTSMixerx, self).__init__(\n", " cls_model=TSMixerx, \n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config,\n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series):\n", " config = cls.default_config.copy() \n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config[\"input_size_multiplier\"]])\n", "\n", " # Rolling windows with step_size=1 or step_size=h\n", " # See `BaseWindows` and `BaseRNN`'s create_windows\n", " config['step_size'] = tune.choice([1, h])\n", " del config[\"input_size_multiplier\"]\n", " if backend == 'optuna':\n", " # Always use n_series from parameters\n", " config['n_series'] = n_series\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "f99bd4dd", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoTSMixerx, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "41f67b64", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoTSMixerx.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoTSMixerx(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoTSMixerx(h=12, n_series=1, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "05210f6f", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "assert model.config(MockTrial())['n_series'] == 1\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoTSMixerx, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoTSMixerx.get_default_config(h=12, backend='optuna', n_series=1)\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoTSMixerx(h=12, n_series=1, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoTSMixerx.get_default_config(h=12, backend='ray', n_series=1)\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoTSMixerx(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "18518b1a", "metadata": {}, "outputs": [], "source": [ "#| export\n", "class AutoMLPMultivariate(BaseAuto):\n", "\n", " default_config = {\n", " \"input_size_multiplier\": [1, 2, 3, 4, 5],\n", " \"h\": None,\n", " \"n_series\": None,\n", " \"hidden_size\": tune.choice( [256, 512, 1024] ),\n", " \"num_layers\": tune.randint(2, 6),\n", " \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n", " \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n", " \"max_steps\": tune.choice([500, 1000]),\n", " \"batch_size\": tune.choice([32, 64, 128, 256]),\n", " \"loss\": None,\n", " \"random_seed\": tune.randint(1, 20),\n", " }\n", "\n", " def __init__(self,\n", " h,\n", " n_series,\n", " loss=MAE(),\n", " valid_loss=None, \n", " config=None,\n", " search_alg=BasicVariantGenerator(random_state=1),\n", " num_samples=10,\n", " refit_with_val=False,\n", " cpus=cpu_count(),\n", " gpus=torch.cuda.device_count(),\n", " verbose=False,\n", " alias=None,\n", " backend='ray',\n", " callbacks=None):\n", "\n", " # Define search space, input/output sizes\n", " if config is None:\n", " config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n", "\n", " # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n", " if backend == 'ray':\n", " config['n_series'] = n_series\n", " elif backend == 'optuna':\n", " mock_trial = MockTrial()\n", " if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n", " raise Exception(f\"config needs 'n_series': {n_series}\") \n", "\n", " super(AutoMLPMultivariate, self).__init__(\n", " cls_model=MLPMultivariate,\n", " h=h,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", " config=config, \n", " search_alg=search_alg,\n", " num_samples=num_samples, \n", " refit_with_val=refit_with_val,\n", " cpus=cpus,\n", " gpus=gpus,\n", " verbose=verbose,\n", " alias=alias,\n", " backend=backend,\n", " callbacks=callbacks, \n", " )\n", "\n", " @classmethod\n", " def get_default_config(cls, h, backend, n_series):\n", " config = cls.default_config.copy() \n", " config['input_size'] = tune.choice([h * x \\\n", " for x in config[\"input_size_multiplier\"]])\n", "\n", " # Rolling windows with step_size=1 or step_size=h\n", " # See `BaseWindows` and `BaseRNN`'s create_windows\n", " config['step_size'] = tune.choice([1, h])\n", " del config[\"input_size_multiplier\"]\n", " if backend == 'optuna':\n", " # Always use n_series from parameters\n", " config['n_series'] = n_series\n", " config = cls._ray_config_to_optuna(config) \n", "\n", " return config" ] }, { "cell_type": "code", "execution_count": null, "id": "d077b8e5", "metadata": {}, "outputs": [], "source": [ "show_doc(AutoMLPMultivariate, title_level=3)" ] }, { "cell_type": "code", "execution_count": null, "id": "abd40c5b", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "# Use your own config or AutoTSMixerx.default_config\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoMLPMultivariate(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Optuna\n", "model = AutoMLPMultivariate(h=12, n_series=1, config=None, backend='optuna')" ] }, { "cell_type": "code", "execution_count": null, "id": "64639b42", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Check Optuna\n", "assert model.config(MockTrial())['h'] == 12\n", "assert model.config(MockTrial())['n_series'] == 1\n", "\n", "# Unit test to test that Auto* model contains all required arguments from BaseAuto\n", "test_args(AutoMLPMultivariate, exclude_args=['cls_model']) \n", "\n", "# Unit test for situation: Optuna with updated default config\n", "my_config = AutoMLPMultivariate.get_default_config(h=12, backend='optuna', n_series=1)\n", "def my_config_new(trial):\n", " config = {**my_config(trial)}\n", " config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12})\n", " return config\n", "\n", "model = AutoMLPMultivariate(h=12, n_series=1, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)\n", "\n", "# Unit test for situation: Ray with updated default config\n", "my_config = AutoMLPMultivariate.get_default_config(h=12, backend='ray', n_series=1)\n", "my_config['max_steps'] = 1\n", "my_config['val_check_steps'] = 1\n", "my_config['input_size'] = 12\n", "model = AutoMLPMultivariate(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n", "model.fit(dataset=dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "69d588d9", "metadata": {}, "source": [ "# TESTS" ] }, { "cell_type": "code", "execution_count": null, "id": "db128f64-b311-479e-bf81-07c3bf9f1a5e", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "\n", "from neuralforecast.tsdataset import TimeSeriesDataset\n", "from neuralforecast.utils import AirPassengersDF as Y_df" ] }, { "cell_type": "code", "execution_count": null, "id": "1691fdcc-99e3-472b-ae26-03fb89847227", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "# Split train/test and declare time series dataset\n", "Y_train_df = Y_df[Y_df.ds<='1959-12-31'] # 132 train\n", "Y_test_df = Y_df[Y_df.ds>'1959-12-31'] # 12 test\n", "dataset, *_ = TimeSeriesDataset.from_df(Y_train_df)\n", "\n", "config = dict(max_steps=1, val_check_steps=1, input_size=12)\n", "model = AutoNHITS(h=12, config=config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)" ] }, { "cell_type": "code", "execution_count": null, "id": "535cf3a7", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "## TESTS\n", "nhits_config = {\n", " \"learning_rate\": tune.choice([1e-3]), # Initial Learning rate\n", " \"max_steps\": tune.choice([1]), # Number of SGD steps\n", " \"val_check_steps\": tune.choice([1]), # Number of steps between validation\n", " \"input_size\": tune.choice([5 * 12]), # input_size = multiplier * horizon\n", " \"batch_size\": tune.choice([7]), # Number of series in windows\n", " \"windows_batch_size\": tune.choice([256]), # Number of windows in batch\n", " \"n_pool_kernel_size\": tune.choice([[2, 2, 2], [16, 8, 1]]), # MaxPool's Kernelsize\n", " \"n_freq_downsample\": tune.choice([[168, 24, 1], [24, 12, 1], [1, 1, 1]]), # Interpolation expressivity ratios\n", " \"activation\": tune.choice(['ReLU']), # Type of non-linear activation\n", " \"n_blocks\": tune.choice([[1, 1, 1]]), # Blocks per each 3 stacks\n", " \"mlp_units\": tune.choice([[[512, 512], [512, 512], [512, 512]]]), # 2 512-Layers per block for each stack\n", " \"interpolation_mode\": tune.choice(['linear']), # Type of multi-step interpolation\n", " \"random_seed\": tune.randint(1, 10),\n", " }\n", "\n", "model = AutoNHITS(h=12, loss=MAE(), valid_loss=MSE(), config=nhits_config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "# Test equality\n", "test_eq(str(type(model.valid_loss)), \"\")" ] }, { "cell_type": "code", "execution_count": null, "id": "0bd1d011-aafb-4b7c-a0a0-0f190b529fa8", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "from neuralforecast.losses.pytorch import GMM, sCRPS" ] }, { "cell_type": "code", "execution_count": null, "id": "b7e471ca-a633-4868-be0b-3f2992b6cdc1", "metadata": {}, "outputs": [], "source": [ "#| hide\n", "## TODO: Add unit tests for interactions between loss/valid_loss types\n", "## TODO: Unit tests (2 types of networks x 2 types of loss x 2 types of valid loss)\n", "## Checking if base recurrent methods run point valid_loss correctly\n", "tcn_config = {\n", " \"learning_rate\": tune.choice([1e-3]), # Initial Learning rate\n", " \"max_steps\": tune.choice([1]), # Number of SGD steps\n", " \"val_check_steps\": tune.choice([1]), # Number of steps between validation\n", " \"input_size\": tune.choice([5 * 12]), # input_size = multiplier * horizon\n", " \"batch_size\": tune.choice([7]), # Number of series in windows\n", " \"random_seed\": tune.randint(1, 10),\n", " }\n", "\n", "model = AutoTCN(h=12, \n", " loss=MAE(), \n", " valid_loss=MSE(), \n", " config=tcn_config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)\n", "\n", "## Checking if base recurrent methods run quantile valid_loss correctly\n", "model = AutoTCN(h=12, \n", " loss=GMM(n_components=2, level=[80, 90]),\n", " valid_loss=sCRPS(level=[80, 90]),\n", " config=tcn_config, num_samples=1, cpus=1)\n", "\n", "# Fit and predict\n", "model.fit(dataset=dataset)\n", "y_hat = model.predict(dataset=dataset)" ] } ], "metadata": { "kernelspec": { "display_name": "python3", "language": "python", "name": "python3" } }, "nbformat": 4, "nbformat_minor": 5 }