"official/vision/image_classification/resnet/common.py" did not exist on "bd211e3eca52e4d9ae16540394af27a52a12b1ee"
Commit 9c0053b7 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #953 canceled with stages
export CUDA_VISIBLE_DEVICES=3
model_name=Transformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/electricity/ \
# --data_path electricity.csv \
# --model_id ECL_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 321 \
# --dec_in 321 \
# --c_out 321 \
# --des 'Exp' \
# --itr 1
# 20% partial variates, enc_in: 64 = 321 // 5
python -u run.py \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 64 \
--dec_in 64 \
--c_out 64 \
--des 'Exp' \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--d_model 32 \
--d_ff 64 \
--itr 1
model_name=iTransformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/electricity/ \
# --data_path electricity.csv \
# --model_id ECL_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 321 \
# --dec_in 321 \
# --c_out 321 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 64 \
--dec_in 64 \
--c_out 64 \
--des 'Exp' \
--exp_name partial_train \
--itr 1
\ No newline at end of file
# iTransformer for Variate Generalization
This folder contains the implementation of the iTransformer to generalize on unseen variates. If you are new to this repo, we recommend you to read this [README](../multivariate_forecasting/README.md) first.
By inverting vanilla Transformers, the model is empowered with the generalization capability on unseen variates. Firstly, benefiting from the flexibility of the number of input tokens, the amount of variate channels is no longer restricted and thus feasible to vary from training and inference. Second, feed-forward networks are identically applied on independent variate tokens to learn transferable representations of time series.
## Scripts
```
# Train models with only 20% of variates from Traffic and test the model on all variates without finetuning
bash ./scripts/variate_generalization/Traffic/iTransformer.sh
```
> During Training
<p align="center">
<img src="../../figures/pt.png" alt="" align=center />
</p>
> During Inference
<p align="center">
<img src="../../figures/pi.png" alt="" align=center />
</p>
In each folder named after the dataset, we provide two strategies to enable Transformers to generalize on unseen variate.
* **CI-Transformers**: Channel Independence regards each variate of time series as independent channels, and uses a shared backbone to forecast all variates. Therefore, the model can predict variates one by one, but the training and inference procedure can be time-consuming.
* **iTransformers**: benefiting from the flexibility of attention that the number of input tokens can be dynamically changeable, the amount of variates as tokens is no longer restricted, and can even allow the model to be trained on arbitrary variables.
## Results
<p align="center">
<img src="../../figures/generability.png" alt="" align=center />
</p>
iTransformers can be naturally trained with 20% variates and accomplish forecast on all variates with the ability to learn transferable representations.
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=0
model_name=Flowformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--d_model 32 \
--d_ff 64 \
--learning_rate 0.0005 \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--itr 1
model_name=iFlowformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--learning_rate 0.0005 \
--exp_name partial_train \
--itr 1
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=1
model_name=Informer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--d_model 32 \
--d_ff 64 \
--learning_rate 0.0005 \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--itr 1
model_name=iInformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--learning_rate 0.0005 \
--exp_name partial_train \
--itr 1
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=2
model_name=Reformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--d_model 32 \
--d_ff 64 \
--learning_rate 0.0005 \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--itr 1
model_name=iReformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--learning_rate 0.0005 \
--exp_name partial_train \
--itr 1
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=3
model_name=Transformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
# 20% partial variates: 27 = 137 // 5
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--d_model 32 \
--d_ff 64 \
--learning_rate 0.0005 \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--itr 1
model_name=iTransformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/Solar/ \
# --data_path solar_AL.txt \
# --model_id solar_96_96 \
# --model $model_name \
# --data Solar \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 137 \
# --dec_in 137 \
# --c_out 137 \
# --des 'Exp' \
# --learning_rate 0.0005 \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/Solar/ \
--data_path solar_AL.txt \
--model_id solar_96_96 \
--model $model_name \
--data Solar \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 27 \
--dec_in 27 \
--c_out 27 \
--des 'Exp' \
--learning_rate 0.0005 \
--exp_name partial_train \
--itr 1
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=0
model_name=Flowformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--channel_independence true \
--exp_name partial_train \
--batch_size 4 \
--d_model 32 \
--d_ff 64 \
--itr 1
model_name=iFlowformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--exp_name partial_train \
--itr 1
export CUDA_VISIBLE_DEVICES=1
model_name=Informer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--channel_independence true \
--exp_name partial_train \
--batch_size 4 \
--d_model 32 \
--d_ff 64 \
--itr 1
model_name=iInformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--exp_name partial_train \
--itr 1
export CUDA_VISIBLE_DEVICES=2
model_name=Reformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--channel_independence true \
--exp_name partial_train \
--batch_size 4 \
--d_model 32 \
--d_ff 64 \
--itr 1
model_name=iReformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--exp_name partial_train \
--itr 1
\ No newline at end of file
export CUDA_VISIBLE_DEVICES=3
model_name=Transformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
# 20% partial variates, enc_in: 172 = 862 // 5
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--channel_independence true \
--exp_name partial_train \
--batch_size 8 \
--d_model 32 \
--d_ff 64 \
--itr 1
model_name=iTransformer
#python -u run.py \
## --is_training 1 \
# --root_path ./dataset/traffic/ \
# --data_path traffic.csv \
# --model_id traffic_96_96 \
# --model $model_name \
# --data custom \
# --features M \
# --seq_len 96 \
# --label_len 48 \
# --pred_len 96 \
# --e_layers 2 \
# --d_layers 1 \
# --factor 3 \
# --enc_in 862 \
# --dec_in 862 \
# --c_out 862 \
# --des 'Exp' \
# --itr 1
python -u run.py \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_96_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 96 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 172 \
--dec_in 172 \
--c_out 172 \
--des 'Exp' \
--exp_name partial_train \
--itr 1
import torch
class TriangularCausalMask():
def __init__(self, B, L, device="cpu"):
mask_shape = [B, 1, L, L]
with torch.no_grad():
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)
@property
def mask(self):
return self._mask
class ProbMask():
def __init__(self, B, H, L, index, scores, device="cpu"):
_mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)
_mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])
indicator = _mask_ex[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
index, :].to(device)
self._mask = indicator.view(scores.shape).to(device)
@property
def mask(self):
return self._mask
import numpy as np
def RSE(pred, true):
return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))
def CORR(pred, true):
u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)
d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0))
return (u / d).mean(-1)
def MAE(pred, true):
return np.mean(np.abs(pred - true))
def MSE(pred, true):
return np.mean((pred - true) ** 2)
def RMSE(pred, true):
return np.sqrt(MSE(pred, true))
def MAPE(pred, true):
return np.mean(np.abs((pred - true) / true))
def MSPE(pred, true):
return np.mean(np.square((pred - true) / true))
def metric(pred, true):
mae = MAE(pred, true)
mse = MSE(pred, true)
rmse = RMSE(pred, true)
mape = MAPE(pred, true)
mspe = MSPE(pred, true)
return mae, mse, rmse, mape, mspe
# From: gluonts/src/gluonts/time_feature/_base.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, freq='h'):
return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import pandas as pd
plt.switch_backend('agg')
def adjust_learning_rate(optimizer, epoch, args):
# lr = args.learning_rate * (0.2 ** (epoch // 2))
if args.lradj == 'type1':
lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}
elif args.lradj == 'type2':
lr_adjust = {
2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,
10: 5e-7, 15: 1e-7, 20: 5e-8
}
if epoch in lr_adjust.keys():
lr = lr_adjust[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Updating learning rate to {}'.format(lr))
class EarlyStopping:
def __init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')
self.val_loss_min = val_loss
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class StandardScaler():
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def visual(true, preds=None, name='./pic/test.pdf'):
"""
Results visualization
"""
plt.figure()
plt.plot(true, label='GroundTruth', linewidth=2)
if preds is not None:
plt.plot(preds, label='Prediction', linewidth=2)
plt.legend()
plt.savefig(name, bbox_inches='tight')
def adjustment(gt, pred):
anomaly_state = False
for i in range(len(gt)):
if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
anomaly_state = True
for j in range(i, 0, -1):
if gt[j] == 0:
break
else:
if pred[j] == 0:
pred[j] = 1
for j in range(i, len(gt)):
if gt[j] == 0:
break
else:
if pred[j] == 0:
pred[j] = 1
elif gt[i] == 0:
anomaly_state = False
if anomaly_state:
pred[i] = 1
return gt, pred
def cal_accuracy(y_pred, y_true):
return np.mean(y_pred == y_true)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment