Commit e3f7f7b3 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #956 failed with stages
in 0 seconds
{
"files": [
"README.md"
],
"imageSize": 100,
"commit": false,
"contributors": [
{
"login": "FedericoGarza",
"name": "fede",
"avatar_url": "https://avatars.githubusercontent.com/u/10517170?v=4",
"profile": "https://github.com/FedericoGarza",
"contributions": [
"code",
"maintenance"
]
},
{
"login": "cchallu",
"name": "Cristian Challu",
"avatar_url": "https://avatars.githubusercontent.com/u/31133398?v=4",
"profile": "https://github.com/cchallu",
"contributions": [
"code",
"maintenance"
]
},
{
"login": "jmoralez",
"name": "José Morales",
"avatar_url": "https://avatars.githubusercontent.com/u/8473587?v=4",
"profile": "https://github.com/jmoralez",
"contributions": [
"code",
"maintenance"
]
},
{
"login": "mergenthaler",
"name": "mergenthaler",
"avatar_url": "https://avatars.githubusercontent.com/u/4086186?v=4",
"profile": "https://github.com/mergenthaler",
"contributions": [
"doc",
"code"
]
},
{
"login": "kdgutier",
"name": "Kin",
"avatar_url": "https://avatars.githubusercontent.com/u/19935241?v=4",
"profile": "https://github.com/kdgutier",
"contributions": [
"code",
"bug",
"data"
]
},
{
"login": "gdevos010",
"name": "Greg DeVos",
"avatar_url": "https://avatars.githubusercontent.com/u/15316026?v=4",
"profile": "https://github.com/gdevos010",
"contributions": [
"ideas"
]
},
{
"login": "alejandroxag",
"name": "Alejandro",
"avatar_url": "https://avatars.githubusercontent.com/u/64334543?v=4",
"profile": "https://github.com/alejandroxag",
"contributions": [
"code"
]
},
{
"login": "stefanialvs",
"name": "stefanialvs",
"avatar_url": "https://avatars.githubusercontent.com/u/48966177?v=4",
"profile": "http://lavattiata.com",
"contributions": [
"design"
]
},
{
"login": "eltociear",
"name": "Ikko Ashimine",
"avatar_url": "https://avatars.githubusercontent.com/u/22633385?v=4",
"profile": "https://bandism.net/",
"contributions": [
"bug"
]
},
{
"login": "vglaucus",
"name": "vglaucus",
"avatar_url": "https://avatars.githubusercontent.com/u/75549033?v=4",
"profile": "https://github.com/vglaucus",
"contributions": [
"bug"
]
},
{
"login": "pitmonticone",
"name": "Pietro Monticone",
"avatar_url": "https://avatars.githubusercontent.com/u/38562595?v=4",
"profile": "https://github.com/pitmonticone",
"contributions": [
"bug"
]
}
],
"contributorsPerLine": 7,
"projectName": "neuralforecast",
"projectOwner": "Nixtla",
"repoType": "github",
"repoHost": "https://github.com",
"skipCi": true
}
version: 2.1
jobs:
nbdev-tests:
resource_class: xlarge
docker:
- image: mambaorg/micromamba:1.5-focal
steps:
- checkout
- run:
name: Install dependencies
command: micromamba install -n base -c conda-forge -y python=3.10 git -f environment-cpu.yml
- run:
name: Run nbdev tests
command: |
eval "$(micromamba shell hook --shell bash)"
micromamba activate base
pip install ".[dev]"
nbdev_test --do_print --timing --n_workers 1
test-model-performance:
resource_class: xlarge
docker:
- image: mambaorg/micromamba:1.5-focal
steps:
- checkout
- run:
name: Install dependencies
command: micromamba install -n base -c conda-forge -y python=3.10 -f environment-cpu.yml
- run:
name: Run model performance tests
command: |
eval "$(micromamba shell hook --shell bash)"
micromamba activate base
pip install -e ".[dev]"
export LD_LIBRARY_PATH=/opt/conda/lib:$LD_LIBRARY_PATH
cd ./action_files/test_models/
pip install -r requirements.txt
python -m src.models
python -m src.evaluation
cd ../../
- store_artifacts:
path: ./action_files/test_models/data/evaluation.csv
destination: evaluation.csv
test-model-performance2:
resource_class: xlarge
docker:
- image: mambaorg/micromamba:1.5-focal
steps:
- checkout
- run:
name: Install dependencies
command: micromamba install -n base -c conda-forge -y python=3.10 -f environment-cpu.yml
- run:
name: Run model performance tests
command: |
eval "$(micromamba shell hook --shell bash)"
micromamba activate base
pip install -e ".[dev]"
export LD_LIBRARY_PATH=/opt/conda/lib:$LD_LIBRARY_PATH
cd ./action_files/test_models/
pip install -r requirements.txt
python -m src.models2
python -m src.evaluation2
cd ../../
- store_artifacts:
path: ./action_files/test_models/data/evaluation.csv
destination: evaluation.csv
workflows:
sample:
jobs:
- nbdev-tests
- test-model-performance
- test-model-performance2
.ipynb_checkpoints
__pycache__
*.egg-info
Gemfile*
Gemfile.lock
docs/_site
build
dist
.vscode
*.gif
*.csv
*/data/*
*.parquet
tmp
_docs/
sidebar.yml
.DS_Store
.gitattributes
.gitconfig
nbs/.last_checked
_proc
lightning_logs/
/**/data/
debug_run
longhorizon/
_old/
fail_fast: true
repos:
- repo: local
hooks:
- id: imports_with_code
name: Cells with imports and code
entry: python action_files/imports_with_code.py
language: system
- repo: https://github.com/fastai/nbdev
rev: 2.2.10
hooks:
- id: nbdev_clean
- id: nbdev_export
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.1
hooks:
- id: ruff
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.8.0
hooks:
- id: mypy
args: [--ignore-missing-imports]
exclude: 'setup.py'
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ops@nixtla.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
# How to contribute
## Did you find a bug?
* Ensure the bug was not already reported by searching on GitHub under Issues.
* If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and clear description, as much relevant information as possible, and a code sample or an executable test case demonstrating the expected behavior that is not occurring.
* Be sure to add the complete error messages.
## Do you have a feature request?
* Ensure that it hasn't been yet implemented in the `main` branch of the repository and that there's not an Issue requesting it yet.
* Open a new issue and make sure to describe it clearly, mention how it improves the project and why its useful.
## Do you want to fix a bug or implement a feature?
Bug fixes and features are added through pull requests (PRs).
## PR submission guidelines
* Keep each PR focused. While it's more convenient, do not combine several unrelated fixes together. Create as many branches as needing to keep each PR focused.
* Ensure that your PR includes a test that fails without your patch, and passes with it.
* Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
* Do not mix style changes/fixes with "functional" changes. It's very difficult to review such PRs and it most likely get rejected.
* Do not add/remove vertical whitespace. Preserve the original style of the file you edit as much as you can.
* Do not turn an already submitted PR into your development playground. If after you submitted PR, you discovered that more work is needed - close the PR, do the required work and then submit a new PR. Otherwise each of your commits requires attention from maintainers of the project.
* If, however, you submitted a PR and received a request for changes, you should proceed with commits inside that PR, so that the maintainer can see the incremental fixes and won't need to review the whole PR again. In the exception case where you realize it'll take many many commits to complete the requests, then it's probably best to close the PR, do the work and then submit it again. Use common sense where you'd choose one way over another.
### Local setup for working on a PR
#### Clone the repository
* HTTPS: `git clone https://github.com/Nixtla/neuralforecast.git`
* SSH: `git clone git@github.com:Nixtla/neuralforecast.git`
* GitHub CLI: `gh repo clone Nixtla/neuralforecast`
#### Set up a conda environment
The repo comes with an `environment.yml` file which contains the libraries needed to run all the tests. In order to set up the environment you must have `conda` installed, we recommend [miniconda](https://docs.conda.io/en/latest/miniconda.html).
Once you have `conda` go to the top level directory of the repository and run the following lines:
```
conda create -n neuralforecast python=3.10
conda activate neuralforecast
```
Then, run one of the following commands:
```
conda env update -f environment-cpu.yml # choose this if you want to install the CPU-only version of neuralforecast
conda env update -f environment-cuda.yml # choose this if you want to install the CUDA-enabled version of neuralforecast
```
#### Install the library
Once you have your environment setup, activate it using `conda activate neuralforecast` and then install the library in editable mode using `pip install -e ".[dev]"`
#### Install git hooks
Before doing any changes to the code, please install the git hooks that run automatic scripts during each commit and merge to strip the notebooks of superfluous metadata (and avoid merge conflicts).
```
nbdev_install_hooks
pre-commit install
```
### Preview Changes
You can preview changes in your local browser before pushing by using the `nbdev_preview`.
### Building the library
The library is built using the notebooks contained in the `nbs` folder. If you want to make any changes to the library you have to find the relevant notebook, make your changes and then call:
```
nbdev_export
```
### Running tests
If you're working on the local interface you can just use `nbdev_test --n_workers 1 --do_print --timing`.
### Cleaning notebooks
Since the notebooks output cells can vary from run to run (even if they produce the same outputs) the notebooks are cleaned before committing them. Please make sure to run `nbdev_clean --clear_all` before committing your changes. If you clean the library's notebooks with this command please backtrack the changes you make to the example notebooks `git checkout nbs/examples`, unless you intend to change the examples.
## Do you want to contribute to the documentation?
* Docs are automatically created from the notebooks in the `nbs` folder.
* In order to modify the documentation:
1. Find the relevant notebook.
2. Make your changes.
3. Run all cells.
4. If you are modifying library notebooks (not in `nbs/examples`), clean all outputs using `Edit > Clear All Outputs`.
5. Run `nbdev_preview`.
6. Clean the notebook metadata using `nbdev_clean`.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2024 Nixtla
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
## Include List
include README.md
include LICENSE
include settings.ini
recursive-include neuralforecast *
## Exclude List
exclude CONTRIBUTING.md
exclude Makefile
exclude environment.yml
exclude .gitconfig
exclude .gitignore
exclude .gitmodules
recursive-exclude .github *
recursive-exclude docs *
recursive-exclude examples *
recursive-exclude experiments *
recursive-exclude nbs *
# iTransformer
iTransformer可以跨多个时间步查看一个特征,能高效利用长程时序特征,本步骤适于时序预测库neuralforecast中的iTransformer算法,其它算法的使用依此类推。
## 论文
`iTransformer: Inverted Transformers Are Effective for Time Series Forecasting`
- https://arxiv.org/pdf/2310.06625
## 模型结构
采用标准的transformer decoder结构,对于backbone,无需修改transformer标准结构的代码即可实现本算法。
<div align=center>
<img src="./doc/transformer.png"/>
</div>
## 算法原理
iTransformer通过简单地转置输入的形状来实现跨多个时间步查看一个特征,模型不是对输入的子序列进行令牌化,而是对整个输入序列进行令牌化,通过这种方式,注意力层可以专注于学习多元相关性,而前馈网络则负责对整个输入序列进行编码。
<div align=center>
<img src="./doc/iTransformer.png"/>
</div>
## 环境配置
```
mv neuralforecast-itransformer_pytorch neuralforecast # 去框架名后缀
```
### Docker(方法一)
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.1.0-centos7.6-dtk23.10-py38
# <your IMAGE ID>为以上拉取的docker的镜像ID替换,本镜像为:ffa1f63239fc
docker run -it --shm-size=32G -v $PWD/neuralforecast:/home/neuralforecast -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video --name neuralforecast <your IMAGE ID> bash
cd /home/neuralforecast
pip install -r requirements.txt # requirements.txt
```
### Dockerfile(方法二)
```
cd neuralforecast/docker
docker build --no-cache -t neuralforecast:latest .
docker run --shm-size=32G --name neuralforecast -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video -v $PWD/../../neuralforecast:/home/neuralforecast -it neuralforecast bash
# 若遇到Dockerfile启动的方式安装环境需要长时间等待,可注释掉里面的pip安装,启动容器后再安装python库:pip install -r requirements.txt。
```
### Anaconda(方法三)
1、关于本项目DCU显卡所需的特殊深度学习库可从光合开发者社区下载安装:
- https://developer.hpccube.com/tool/
```
DTK驱动:dtk23.10
python:python3.8
torch:2.1.0
torchvision:0.16.0
```
`Tips:以上dtk驱动、python、torch等DCU相关工具版本需要严格一一对应。`
2、其它非特殊库参照requirements.txt安装
```
pip install -r requirements.txt # requirements.txt
```
## 数据集
本步骤说明采用LongHorizon中的`ETTm2`
- https://nhits-experiments.s3.amazonaws.com/datasets.zip
数据目录结构如下:
```
ETT-small/longhorizon/datasets/ETTm2
├── df_x.csv
├── df_y.csv
├── M
├── ├── df_x.csv
├── └── df_y.csv
└── S
├── ├── df_x.csv
├── └── df_y.csv
```
## 训练
### 单机单卡
```
export HIP_VISIBLE_DEVICES=0
cd neuralforecast
python trainval.py # 该库中的iTransformer目前仅支持单指标预测的训练与推理,多指标预测敬请等待作者开源。
```
更多资料可参考源项目的[`README_origin`](./README_origin.md)
## 推理
```
export HIP_VISIBLE_DEVICES=0
python infer.py
# 默认按天预测, 故pred_len为96。
```
## result
# 输入
```
ds,unique_id,y
2016-07-01 00:00:00,OT,1.0180321560148238
2016-07-01 00:15:00,OT,0.9801244795019736
2016-07-01 00:30:00,OT,0.9042228236873991
2016-07-01 00:45:00,OT,0.8852256693365586
...
2018-02-20 23:00:00,OT,-1.5813253301696957
2018-02-20 23:15:00,OT,-1.5813253301696957
2018-02-20 23:30:00,OT,-1.5623282581688442
2018-02-20 23:45:00,OT,-1.5623282581688442
```
# 输出
```
unique_id ds iTransformer
0 OT 2018-02-21 00:00:00 -1.567348
1 OT 2018-02-21 00:15:00 -1.577499
2 OT 2018-02-21 00:30:00 -1.579781
3 OT 2018-02-21 00:45:00 -1.587281
4 OT 2018-02-21 01:00:00 -1.578477
...
92 OT 2018-02-21 23:00:00 -1.569653
93 OT 2018-02-21 23:15:00 -1.546048
94 OT 2018-02-21 23:30:00 -1.561576
95 OT 2018-02-21 23:45:00 -1.542416
```
### 精度
测试数据:[`ETTm2`](./ETT-small/longhorizon/datasets/ETTm2/df_y.csv)中划出一部分作验证集,max_steps=1000,推理框架:pytorch。
| device | valid_loss |
|:---------:|:----------:|
| DCU Z100L | 0.226 |
| GPU V100S | 0.224 |
## 应用场景
### 算法类别
`时序预测`
### 热点应用行业
`金融,运维,电商,制造,能源,医疗`
## 源码仓库及问题反馈
- http://developer.hpccube.com/codes/modelzoo/neuralforecast-itransformer_pytorch.git
## 参考资料
- https://github.com/thuml/iTransformer.git
# Nixtla &nbsp; [![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Statistical%20Forecasting%20Algorithms%20by%20Nixtla%20&url=https://github.com/Nixtla/neuralforecast&via=nixtlainc&hashtags=StatisticalModels,TimeSeries,Forecasting) &nbsp;[![Slack](https://img.shields.io/badge/Slack-4A154B?&logo=slack&logoColor=white)](https://join.slack.com/t/nixtlacommunity/shared_invite/zt-1pmhan9j5-F54XR20edHk0UtYAPcW4KQ)
<div align="center">
<img src="https://raw.githubusercontent.com/Nixtla/neuralforecast/main/nbs/imgs_indx/logo_new.png">
<h1 align="center">Neural 🧠 Forecast</h1>
<h3 align="center">User friendly state-of-the-art neural forecasting models</h3>
[![CI](https://github.com/Nixtla/neuralforecast/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/Nixtla/neuralforecast/actions/workflows/ci.yaml)
[![Python](https://img.shields.io/pypi/pyversions/neuralforecast)](https://pypi.org/project/neuralforecast/)
[![PyPi](https://img.shields.io/pypi/v/neuralforecast?color=blue)](https://pypi.org/project/neuralforecast/)
[![conda-nixtla](https://img.shields.io/conda/vn/conda-forge/neuralforecast?color=seagreen&label=conda)](https://anaconda.org/conda-forge/neuralforecast)
[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Nixtla/neuralforecast/blob/main/LICENSE)
[![docs](https://img.shields.io/website-up-down-green-red/http/nixtla.github.io/neuralforecast.svg?label=docs)](https://nixtla.github.io/neuralforecast/)
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[![All Contributors](https://img.shields.io/badge/all_contributors-11-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
**NeuralForecast** offers a large collection of neural forecasting models focusing on their performance, usability, and robustness. The models range from classic networks like `RNN` to the latest transformers: `MLP`, `LSTM`, `GRU`,`RNN`,`TCN`, `DeepAR`, `NBEATS`, `NBEATSx`, `NHITS`, `DLinear`, `NLinear`,`TFT`, `Informer`, `AutoFormer`, `FedFormer`, `PatchTST`,`StemGNN`, and `TimesNet`.
</div>
## Installation
You can install `NeuralForecast` with:
```python
pip install neuralforecast
```
or
```python
conda install -c conda-forge neuralforecast
```
Vist our [Installation Guide](https://nixtla.github.io/neuralforecast/examples/installation.html) for further details.
## Quick Start
**Minimal Example**
```python
from neuralforecast import NeuralForecast
from neuralforecast.models import NBEATS
from neuralforecast.utils import AirPassengersDF
nf = NeuralForecast(
models = [NBEATS(input_size=24, h=12, max_steps=100)],
freq = 'M'
)
nf.fit(df=AirPassengersDF)
nf.predict()
```
**Get Started with this [quick guide](https://nixtla.github.io/neuralforecast/examples/getting_started.html).**
## Why?
There is a shared belief in Neural forecasting methods' capacity to improve forecasting pipeline's accuracy and efficiency.
Unfortunately, available implementations and published research are yet to realize neural networks' potential. They are hard to use and continuously fail to improve over statistical methods while being computationally prohibitive. For this reason, we created `NeuralForecast`, a library favoring proven accurate and efficient models focusing on their usability.
## Features
* Fast and accurate implementations of `MLP`, `LSTM`, `GRU`,`RNN`,`TCN`, `DeepAR`, `NBEATS`, `NBEATSx`, `NHITS`, `DLinear`,`TFT`, `Informer`, `AutoFormer`, `FedFormer`, `PatchTST`,`StemGNN`, and `TimesNet`. See the entire [collection here](https://nixtla.github.io/neuralforecast/models.html).
* Support for exogenous variables and static covariates.
* Interpretability methods for trend, seasonality and exogenous components.
* Probabilistic Forecasting with adapters for quantile losses and parametric distributions.
* Train and Evaluation Losses with scale-dependent, percentage and scale independent errors, and parametric likelihoods.
* Automatic Model Selection with distributed automatic hyperparameter tuning.
* Familiar sklearn syntax: `.fit` and `.predict`.
## Highlights
* Official `NHITS` implementation, published at AAAI 2023. See [paper](https://ojs.aaai.org/index.php/AAAI/article/view/25854) and [experiments](./experiments/).
* Official `NBEATSx` implementation, published at the International Journal of Forecasting. See [paper](https://www.sciencedirect.com/science/article/pii/S0169207022000413).
* Unified with`StatsForecast`, `MLForecast`, and `HierarchicalForecast` interface `NeuralForecast().fit(Y_df).predict()`, inputs and outputs.
* Built-in integrations with `utilsforecast` and `coreforecast` for visualization and data-wrangling efficient methods.
* Integrations with `Ray` and `Optuna` for automatic hyperparameter optimization.
* Predict with little to no history using Transfer learning. Check the experiments [here](https://github.com/Nixtla/transfer-learning-time-series).
Missing something? Please open an issue or write us in [![Slack](https://img.shields.io/badge/Slack-4A154B?&logo=slack&logoColor=white)](https://join.slack.com/t/nixtlaworkspace/shared_invite/zt-135dssye9-fWTzMpv2WBthq8NK0Yvu6A)
## Examples and Guides
The [documentation page](https://nixtla.github.io/neuralforecast/) contains all the examples and tutorials.
📈 [Automatic Hyperparameter Optimization](https://nixtla.github.io/neuralforecast/examples/automatic_hyperparameter_tuning.html): Easy and Scalable Automatic Hyperparameter Optimization with `Auto` models on `Ray` or `Optuna`.
🌡️ [Exogenous Regressors](https://nixtla.github.io/neuralforecast/examples/exogenous_variables.html): How to incorporate static or temporal exogenous covariates like weather or prices.
🔌 [Transformer Models](https://nixtla.github.io/neuralforecast/examples/longhorizon_with_transformers.html): Learn how to forecast with many state-of-the-art Transformers models.
👑 [Hierarchical Forecasting](https://nixtla.github.io/neuralforecast/examples/hierarchicalnetworks.html): forecast series with very few non-zero observations.
👩‍🔬 [Add Your Own Model](https://nixtla.github.io/neuralforecast/examples/how_to_add_models.html): Learn how to add a new model to the library.
## Models
|Model | Structure | Sampling | Point Forecast | Probabilistic Forecast | Exogenous features | `Auto` Model
|:------|:-------------:|:-----------------:|:---------------------:|:----------------------------:|:---------------------:|:---------------------:|
|[LSTM](https://nixtlaverse.nixtla.io/neuralforecast/models.lstm.html)| RNN | recurrent |✅|✅|✅|✅|
|[GRU](https://nixtlaverse.nixtla.io/neuralforecast/models.gru.html)| RNN | recurrent |✅|✅|✅|✅|
|[RNN](https://nixtlaverse.nixtla.io/neuralforecast/models.rnn.html)| RNN | recurrent |✅|✅|✅|✅|
|[DilatedRNN](https://nixtlaverse.nixtla.io/neuralforecast/models.dilated_rnn.html)| RNN | recurrent |✅|✅|✅|✅|
|[DeepAR](https://nixtlaverse.nixtla.io/neuralforecast/models.deepar.html)| RNN | recurrent ||✅|✅|✅|
|[TCN](https://nixtlaverse.nixtla.io/neuralforecast/models.tcn.html)| CNN | recurrent |✅|✅|✅|✅|
|[TimesNet](https://nixtlaverse.nixtla.io/neuralforecast/models.timesnet.html)| CNN | windows |✅|✅||✅|
|[DLinear](https://nixtlaverse.nixtla.io/neuralforecast/models.dlinear.html)| Linear | windows |✅|✅||✅|
|[MLP](https://nixtlaverse.nixtla.io/neuralforecast/models.mlp.html)| MLP | windows |✅|✅|✅|✅|
|[NBEATS](https://nixtlaverse.nixtla.io/neuralforecast/models.nbeats.html)| MLP | windows |✅|✅||✅|
|[NBEATSx](https://nixtlaverse.nixtla.io/neuralforecast/models.nbeatsx.html)| MLP | windows |✅|✅|✅|✅|
|[NHITS](https://nixtlaverse.nixtla.io/neuralforecast/models.nhits.html)| MLP | windows |✅|✅|✅|✅|
|[TFT](https://nixtlaverse.nixtla.io/neuralforecast/models.tft.html)| Transformer | windows |✅|✅|✅|✅|
|[Transformer](https://nixtlaverse.nixtla.io/neuralforecast/models.vanillatransformer.html)| Transformer | windows |✅|✅|✅|✅|
|[Informer](https://nixtlaverse.nixtla.io/neuralforecast/models.informer.html)| Transformer | windows |✅|✅|✅|✅|
|[Autoformer](https://nixtlaverse.nixtla.io/neuralforecast/models.autoformer.html)| Transformer | windows |✅|✅|✅|✅|
|[FEDFormer](https://nixtlaverse.nixtla.io/neuralforecast/models.fedformer.html)| Transformer | windows |✅|✅|✅|✅|
|[PatchTST](https://nixtlaverse.nixtla.io/neuralforecast/models.patchtst.html)| Transformer | windows |✅|✅||✅|
|[StemGNN](https://nixtlaverse.nixtla.io/neuralforecast/models.stemgnn.html)| GNN | multivariate |✅|||✅|
Missing a model? Please open an issue or write us in [![Slack](https://img.shields.io/badge/Slack-4A154B?&logo=slack&logoColor=white)](https://join.slack.com/t/nixtlaworkspace/shared_invite/zt-135dssye9-fWTzMpv2WBthq8NK0Yvu6A)
## How to contribute
If you wish to contribute to the project, please refer to our [contribution guidelines](https://github.com/Nixtla/neuralforecast/blob/main/CONTRIBUTING.md).
## References
This work is highly influenced by the fantastic work of previous contributors and other scholars on the neural forecasting methods presented here. We want to highlight the work of [Boris Oreshkin](https://arxiv.org/abs/1905.10437), [Slawek Smyl](https://www.sciencedirect.com/science/article/pii/S0169207019301153), [Bryan Lim](https://www.sciencedirect.com/science/article/pii/S0169207021000637), and [David Salinas](https://arxiv.org/abs/1704.04110). We refer to [Benidis et al.](https://arxiv.org/abs/2004.10240) for a comprehensive survey of neural forecasting methods.
## Contributors ✨
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tbody>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/FedericoGarza"><img src="https://avatars.githubusercontent.com/u/10517170?v=4?s=100" width="100px;" alt="fede"/><br /><sub><b>fede</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=FedericoGarza" title="Code">💻</a> <a href="#maintenance-FedericoGarza" title="Maintenance">🚧</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/cchallu"><img src="https://avatars.githubusercontent.com/u/31133398?v=4?s=100" width="100px;" alt="Cristian Challu"/><br /><sub><b>Cristian Challu</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=cchallu" title="Code">💻</a> <a href="#maintenance-cchallu" title="Maintenance">🚧</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jmoralez"><img src="https://avatars.githubusercontent.com/u/8473587?v=4?s=100" width="100px;" alt="José Morales"/><br /><sub><b>José Morales</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=jmoralez" title="Code">💻</a> <a href="#maintenance-jmoralez" title="Maintenance">🚧</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/mergenthaler"><img src="https://avatars.githubusercontent.com/u/4086186?v=4?s=100" width="100px;" alt="mergenthaler"/><br /><sub><b>mergenthaler</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=mergenthaler" title="Documentation">📖</a> <a href="https://github.com/Nixtla/neuralforecast/commits?author=mergenthaler" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/kdgutier"><img src="https://avatars.githubusercontent.com/u/19935241?v=4?s=100" width="100px;" alt="Kin"/><br /><sub><b>Kin</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=kdgutier" title="Code">💻</a> <a href="https://github.com/Nixtla/neuralforecast/issues?q=author%3Akdgutier" title="Bug reports">🐛</a> <a href="#data-kdgutier" title="Data">🔣</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/gdevos010"><img src="https://avatars.githubusercontent.com/u/15316026?v=4?s=100" width="100px;" alt="Greg DeVos"/><br /><sub><b>Greg DeVos</b></sub></a><br /><a href="#ideas-gdevos010" title="Ideas, Planning, & Feedback">🤔</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/alejandroxag"><img src="https://avatars.githubusercontent.com/u/64334543?v=4?s=100" width="100px;" alt="Alejandro"/><br /><sub><b>Alejandro</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/commits?author=alejandroxag" title="Code">💻</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="http://lavattiata.com"><img src="https://avatars.githubusercontent.com/u/48966177?v=4?s=100" width="100px;" alt="stefanialvs"/><br /><sub><b>stefanialvs</b></sub></a><br /><a href="#design-stefanialvs" title="Design">🎨</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://bandism.net/"><img src="https://avatars.githubusercontent.com/u/22633385?v=4?s=100" width="100px;" alt="Ikko Ashimine"/><br /><sub><b>Ikko Ashimine</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/issues?q=author%3Aeltociear" title="Bug reports">🐛</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/vglaucus"><img src="https://avatars.githubusercontent.com/u/75549033?v=4?s=100" width="100px;" alt="vglaucus"/><br /><sub><b>vglaucus</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/issues?q=author%3Avglaucus" title="Bug reports">🐛</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/pitmonticone"><img src="https://avatars.githubusercontent.com/u/38562595?v=4?s=100" width="100px;" alt="Pietro Monticone"/><br /><sub><b>Pietro Monticone</b></sub></a><br /><a href="https://github.com/Nixtla/neuralforecast/issues?q=author%3Apitmonticone" title="Bug reports">🐛</a></td>
</tr>
</tbody>
</table>
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
import sys
import warnings
from pathlib import Path
from nbdev.processors import NBProcessor, _do_eval
def check_nb(nb_path: str) -> None:
with warnings.catch_warnings(record=True) as issued_warnings:
NBProcessor(nb_path, _do_eval, process=True)
if any(
"Found cells containing imports and other code" in str(w)
for w in issued_warnings
):
print(f"{nb_path} has cells containing imports and code.")
sys.exit(1)
if __name__ == "__main__":
repo_root = Path(__file__).parents[1]
for nb_path in (repo_root / "nbs").glob("*.ipynb"):
check_nb(str(nb_path))
fire
datasetsforecast
\ No newline at end of file
import fire
import pandas as pd
from datasetsforecast.m3 import M3, M3Info
dict_datasets = {
'M3': (M3, M3Info),
}
def get_data(directory: str, dataset: str, group: str, train: bool = True):
if dataset not in dict_datasets.keys():
raise Exception(f'dataset {dataset} not found')
dataclass, datainfo = dict_datasets[dataset]
if group not in datainfo.groups:
raise Exception(f'group {group} not found for {dataset}')
Y_df, *_ = dataclass.load(directory, group)
horizon = datainfo[group].horizon
freq = datainfo[group].freq
seasonality = datainfo[group].seasonality
Y_df_test = Y_df.groupby('unique_id').tail(horizon)
Y_df = Y_df.drop(Y_df_test.index)
if train:
return Y_df, horizon, freq, seasonality
return Y_df_test, horizon, freq, seasonality
def save_data(dataset: str, group: str, train: bool = True):
df, *_ = get_data('data', dataset, group, train)
if train:
df.to_csv(f'data/{dataset}-{group}.csv', index=False)
else:
df.to_csv(f'data/{dataset}-{group}-test.csv', index=False)
if __name__=="__main__":
fire.Fire(save_data)
\ No newline at end of file
from itertools import product
import numpy as np
import pandas as pd
from src.data import get_data
def mae(y, y_hat, axis):
delta_y = np.abs(y - y_hat)
mae = np.average(delta_y, axis=axis)
return mae
def smape(y, y_hat, axis):
delta_y = np.abs(y - y_hat)
scale = np.abs(y) + np.abs(y_hat)
smape = delta_y / scale
smape = 200 * np.average(smape, axis=axis)
return smape
def evaluate(model: str, dataset: str, group: str):
try:
forecast = pd.read_csv(f'data/{model}-forecasts-{dataset}-{group}.csv')
except:
return None
y_test, horizon, freq, seasonality = get_data('data/', dataset, group, False)
y_hat = forecast[model].values.reshape(-1, horizon)
y_test = y_test['y'].values.reshape(-1, horizon)
evals = {}
for metric in (mae, smape):
metric_name = metric.__name__
loss = metric(y_test, y_hat, axis=1).mean()
evals[metric_name] = loss
evals = pd.DataFrame(evals, index=[f'{dataset}_{group}']).rename_axis('dataset').reset_index()
times = pd.read_csv(f'data/{model}-time-{dataset}-{group}.csv')
evals = pd.concat([evals, times], axis=1)
return evals
if __name__ == '__main__':
groups = ['Monthly']
models = ['AutoDilatedRNN', 'RNN', 'TCN', 'DeepAR',
'NHITS', 'TFT', 'AutoMLP', 'DLinear', 'VanillaTransformer']
datasets = ['M3']
evaluation = [evaluate(model, dataset, group) for model, group in product(models, groups) for dataset in datasets]
evaluation = [eval_ for eval_ in evaluation if eval_ is not None]
evaluation = pd.concat(evaluation)
evaluation = evaluation[['dataset', 'model', 'time', 'mae', 'smape']]
evaluation['time'] /= 60 #minutes
evaluation = evaluation.set_index(['dataset', 'model']).stack().reset_index()
evaluation.columns = ['dataset', 'model', 'metric', 'val']
evaluation = evaluation.set_index(['dataset', 'metric', 'model']).unstack().round(3)
evaluation = evaluation.droplevel(0, 1).reset_index()
evaluation['AutoARIMA'] = [666.82, 15.35, 3.000]
evaluation.to_csv('data/evaluation.csv')
print(evaluation.T)
from itertools import product
import numpy as np
import pandas as pd
from src.data import get_data
def mae(y, y_hat, axis):
delta_y = np.abs(y - y_hat)
mae = np.average(delta_y, axis=axis)
return mae
def smape(y, y_hat, axis):
delta_y = np.abs(y - y_hat)
scale = np.abs(y) + np.abs(y_hat)
smape = delta_y / scale
smape = 200 * np.average(smape, axis=axis)
return smape
def evaluate(model: str, dataset: str, group: str):
try:
forecast = pd.read_csv(f'data/{model}-forecasts-{dataset}-{group}.csv')
except:
return None
y_test, horizon, freq, seasonality = get_data('data/', dataset, group, False)
y_hat = forecast[model].values.reshape(-1, horizon)
y_test = y_test['y'].values.reshape(-1, horizon)
evals = {}
for metric in (mae, smape):
metric_name = metric.__name__
loss = metric(y_test, y_hat, axis=1).mean()
evals[metric_name] = loss
evals = pd.DataFrame(evals, index=[f'{dataset}_{group}']).rename_axis('dataset').reset_index()
times = pd.read_csv(f'data/{model}-time-{dataset}-{group}.csv')
evals = pd.concat([evals, times], axis=1)
return evals
if __name__ == '__main__':
groups = ['Monthly']
models = ['LSTM', 'DilatedRNN', 'GRU', 'NBEATSx',
'PatchTST', 'AutoNHITS', 'AutoNBEATS']
datasets = ['M3']
evaluation = [evaluate(model, dataset, group) for model, group in product(models, groups) for dataset in datasets]
evaluation = [eval_ for eval_ in evaluation if eval_ is not None]
evaluation = pd.concat(evaluation)
evaluation = evaluation[['dataset', 'model', 'time', 'mae', 'smape']]
evaluation['time'] /= 60 #minutes
evaluation = evaluation.set_index(['dataset', 'model']).stack().reset_index()
evaluation.columns = ['dataset', 'model', 'metric', 'val']
evaluation = evaluation.set_index(['dataset', 'metric', 'model']).unstack().round(3)
evaluation = evaluation.droplevel(0, 1).reset_index()
evaluation['AutoARIMA'] = [666.82, 15.35, 3.000]
evaluation.to_csv('data/evaluation.csv')
print(evaluation.T)
import os
import time
import fire
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import neuralforecast
from neuralforecast.core import NeuralForecast
from neuralforecast.models.gru import GRU
from neuralforecast.models.rnn import RNN
from neuralforecast.models.tcn import TCN
from neuralforecast.models.lstm import LSTM
from neuralforecast.models.dilated_rnn import DilatedRNN
from neuralforecast.models.deepar import DeepAR
from neuralforecast.models.mlp import MLP
from neuralforecast.models.nhits import NHITS
from neuralforecast.models.nbeats import NBEATS
from neuralforecast.models.nbeatsx import NBEATSx
from neuralforecast.models.tft import TFT
from neuralforecast.models.vanillatransformer import VanillaTransformer
from neuralforecast.models.informer import Informer
from neuralforecast.models.autoformer import Autoformer
from neuralforecast.models.patchtst import PatchTST
from neuralforecast.models.dlinear import DLinear
from neuralforecast.auto import (
AutoMLP, AutoNHITS, AutoNBEATS, AutoDilatedRNN, AutoTFT
)
from neuralforecast.losses.pytorch import SMAPE, MAE
from ray import tune
from src.data import get_data
os.environ['NIXTLA_ID_AS_COL'] = '1'
def main(dataset: str = 'M3', group: str = 'Monthly') -> None:
train, horizon, freq, seasonality = get_data('data/', dataset, group)
train['ds'] = pd.to_datetime(train['ds'])
config_nbeats = {
"input_size": tune.choice([2 * horizon]),
"max_steps": 1000,
"val_check_steps": 300,
"scaler_type": "minmax1",
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
}
config = {
"hidden_size": tune.choice([256, 512]),
"num_layers": tune.choice([2, 4]),
"input_size": tune.choice([2 * horizon]),
"max_steps": 1000,
"val_check_steps": 300,
"scaler_type": "minmax1",
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
}
config_drnn = {'input_size': tune.choice([2 * horizon]),
'encoder_hidden_size': tune.choice([124]),
"max_steps": 300,
"val_check_steps": 100,
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),}
models = [
AutoDilatedRNN(h=horizon, loss=MAE(), config=config_drnn, num_samples=2, cpus=1),
RNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300),
TCN(h=horizon, input_size=2 * horizon, encoder_hidden_size=20, max_steps=300),
NHITS(h=horizon, input_size=2 * horizon, dropout_prob_theta=0.5, loss=MAE(), max_steps=1000, val_check_steps=500),
AutoMLP(h=horizon, loss=MAE(), config=config, num_samples=2, cpus=1),
DLinear(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=2000, val_check_steps=500),
TFT(h=horizon, input_size=2 * horizon, loss=SMAPE(), hidden_size=64, scaler_type='robust', windows_batch_size=512, max_steps=1500, val_check_steps=500),
VanillaTransformer(h=horizon, input_size=2 * horizon, loss=MAE(), hidden_size=64, scaler_type='minmax1', windows_batch_size=512, max_steps=1500, val_check_steps=500),
DeepAR(h=horizon, input_size=2 * horizon, scaler_type='minmax1', max_steps=1000),
]
# Models
for model in models[:-1]:
model_name = type(model).__name__
print(50*'-', model_name, 50*'-')
start = time.time()
fcst = NeuralForecast(models=[model], freq=freq)
fcst.fit(train)
forecasts = fcst.predict()
end = time.time()
print(end - start)
forecasts.columns = ['unique_id', 'ds', model_name]
forecasts.to_csv(f'data/{model_name}-forecasts-{dataset}-{group}.csv', index=False)
time_df = pd.DataFrame({'time': [end - start], 'model': [model_name]})
time_df.to_csv(f'data/{model_name}-time-{dataset}-{group}.csv', index=False)
# DeepAR
model_name = type(models[-1]).__name__
start = time.time()
fcst = NeuralForecast(models=[models[-1]], freq=freq)
fcst.fit(train)
forecasts = fcst.predict()
end = time.time()
print(end - start)
forecasts = forecasts[['unique_id', 'ds', 'DeepAR-median']]
forecasts.columns = ['unique_id', 'ds', 'DeepAR']
forecasts.to_csv(f'data/{model_name}-forecasts-{dataset}-{group}.csv', index=False)
time_df = pd.DataFrame({'time': [end - start], 'model': [model_name]})
time_df.to_csv(f'data/{model_name}-time-{dataset}-{group}.csv', index=False)
if __name__ == '__main__':
fire.Fire(main)
import os
import time
import fire
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import neuralforecast
from neuralforecast.core import NeuralForecast
from neuralforecast.models.gru import GRU
from neuralforecast.models.rnn import RNN
from neuralforecast.models.tcn import TCN
from neuralforecast.models.lstm import LSTM
from neuralforecast.models.dilated_rnn import DilatedRNN
from neuralforecast.models.deepar import DeepAR
from neuralforecast.models.mlp import MLP
from neuralforecast.models.nhits import NHITS
from neuralforecast.models.nbeats import NBEATS
from neuralforecast.models.nbeatsx import NBEATSx
from neuralforecast.models.tft import TFT
from neuralforecast.models.vanillatransformer import VanillaTransformer
from neuralforecast.models.informer import Informer
from neuralforecast.models.autoformer import Autoformer
from neuralforecast.models.patchtst import PatchTST
from neuralforecast.auto import (
AutoMLP, AutoNHITS, AutoNBEATS, AutoDilatedRNN, AutoTFT
)
from neuralforecast.losses.pytorch import SMAPE, MAE
from ray import tune
from src.data import get_data
os.environ['NIXTLA_ID_AS_COL'] = '1'
def main(dataset: str = 'M3', group: str = 'Monthly') -> None:
train, horizon, freq, seasonality = get_data('data/', dataset, group)
train['ds'] = pd.to_datetime(train['ds'])
config_nbeats = {
"input_size": tune.choice([2 * horizon]),
"max_steps": 1000,
"val_check_steps": 300,
"scaler_type": "minmax1",
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
}
config = {
"hidden_size": tune.choice([256, 512]),
"num_layers": tune.choice([2, 4]),
"input_size": tune.choice([2 * horizon]),
"max_steps": 1000,
"val_check_steps": 300,
"scaler_type": "minmax1",
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
}
config_drnn = {'input_size': tune.choice([2 * horizon]),
'encoder_hidden_size': tune.choice([124]),
"max_steps": 300,
"val_check_steps": 100,
"random_seed": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),}
models = [
LSTM(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300),
DilatedRNN(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300),
GRU(h=horizon, input_size=2 * horizon, encoder_hidden_size=50, max_steps=300),
AutoNBEATS(h=horizon, loss=MAE(), config=config_nbeats, num_samples=2, cpus=1),
AutoNHITS(h=horizon, loss=MAE(), config=config_nbeats, num_samples=2, cpus=1),
NBEATSx(h=horizon, input_size=2 * horizon, loss=MAE(), max_steps=1000),
PatchTST(h=horizon, input_size=2 * horizon, patch_len=4, stride=4, loss=MAE(), scaler_type='minmax1', windows_batch_size=512, max_steps=1000, val_check_steps=500),
]
# Models
for model in models[:-1]:
model_name = type(model).__name__
print(50*'-', model_name, 50*'-')
start = time.time()
fcst = NeuralForecast(models=[model], freq=freq)
fcst.fit(train)
forecasts = fcst.predict()
end = time.time()
print(end - start)
forecasts.columns = ['unique_id', 'ds', model_name]
forecasts.to_csv(f'data/{model_name}-forecasts-{dataset}-{group}.csv', index=False)
time_df = pd.DataFrame({'time': [end - start], 'model': [model_name]})
time_df.to_csv(f'data/{model_name}-time-{dataset}-{group}.csv', index=False)
if __name__ == '__main__':
fire.Fire(main)
bars.png

76.3 KB

Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment